use of io.fabric8.kubernetes.api.model.rbac.RoleBuilder in project stackgres by ongres.
the class DbOpsRole method createRole.
/**
* Create the Role for Job associated to the dbOps.
*/
private Role createRole(StackGresDbOpsContext context) {
final StackGresDbOps dbOps = context.getSource();
final Map<String, String> labels = labelFactory.clusterLabels(context.getCluster());
return new RoleBuilder().withNewMetadata().withName(roleName(context)).withNamespace(dbOps.getMetadata().getNamespace()).withLabels(labels).endMetadata().addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("pods").withVerbs("get", "list", "watch", "delete").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("pods/log").withVerbs("get").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("pods/exec").withVerbs("create").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("services", "secrets").withVerbs("get", "list").build()).addToRules(new PolicyRuleBuilder().withApiGroups("apps").withResources("statefulsets").withVerbs("get", "delete").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("events").withVerbs("get", "list", "create", "patch", "update").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresDbOps.class)).withVerbs("get", "list", "watch", "patch", "update").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresCluster.class)).withVerbs("get", "list", "watch", "patch", "update").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresCluster.class) + "/status").withVerbs("update").build()).build();
}
use of io.fabric8.kubernetes.api.model.rbac.RoleBuilder in project stackgres by ongres.
the class PatroniRole method createRole.
/**
* Create the Role for patroni associated to the cluster.
*/
private Role createRole(StackGresDistributedLogsContext context) {
final StackGresDistributedLogs cluster = context.getSource();
final Map<String, String> labels = labelFactory.clusterLabels(cluster);
return new RoleBuilder().withNewMetadata().withName(roleName(context)).withNamespace(cluster.getMetadata().getNamespace()).withLabels(labels).endMetadata().addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("endpoints", "configmaps").withVerbs("create", "get", "list", "patch", "update", "watch").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("secrets").withVerbs("get").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("pods").withVerbs("get", "list", "patch", "update", "watch").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("pods/exec").withVerbs("create").build()).addToRules(new PolicyRuleBuilder().withApiGroups("batch").withResources("cronjobs").withVerbs("get", "patch").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("services").withVerbs("create").build()).addToRules(new PolicyRuleBuilder().withApiGroups("").withResources("events").withVerbs("get", "list", "create", "patch", "update").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresBackup.class)).withVerbs("list", "get", "create", "patch", "delete").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresBackup.class), HasMetadata.getPlural(StackGresBackupConfig.class), HasMetadata.getPlural(StackGresCluster.class), HasMetadata.getPlural(StackGresPostgresConfig.class), HasMetadata.getPlural(StackGresPoolingConfig.class), HasMetadata.getPlural(StackGresProfile.class), HasMetadata.getPlural(StackGresDistributedLogs.class), HasMetadata.getPlural(StackGresDbOps.class)).withVerbs("get", "list", "watch", "patch").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresDistributedLogs.class)).withVerbs("update", "patch").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresDistributedLogs.class) + "/status").withVerbs("update").build()).addToRules(new PolicyRuleBuilder().withApiGroups(CommonDefinition.GROUP).withResources(HasMetadata.getPlural(StackGresCluster.class) + "/status").withVerbs("update").build()).build();
}
use of io.fabric8.kubernetes.api.model.rbac.RoleBuilder in project strimzi by strimzi.
the class ConfigProviderST method testConnectWithConnectorUsingConfigAndEnvProvider.
@ParallelNamespaceTest
void testConnectWithConnectorUsingConfigAndEnvProvider(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String producerName = "producer-" + ClientUtils.generateRandomConsumerGroup();
final String customFileSinkPath = "/tmp/my-own-path.txt";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
Map<String, String> configData = new HashMap<>();
configData.put("topics", topicName);
configData.put("file", customFileSinkPath);
configData.put("key", "org.apache.kafka.connect.storage.StringConverter");
configData.put("value", "org.apache.kafka.connect.storage.StringConverter");
String cmName = "connector-config";
String configRoleName = "connector-config-role";
ConfigMap connectorConfig = new ConfigMapBuilder().editOrNewMetadata().withName(cmName).endMetadata().withData(configData).build();
kubeClient().getClient().configMaps().inNamespace(namespaceName).create(connectorConfig);
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(namespaceName, clusterName, 1).editOrNewMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("config.providers", "configmaps,env").addToConfig("config.providers.configmaps.class", "io.strimzi.kafka.KubernetesConfigMapConfigProvider").addToConfig("config.providers.env.class", "io.strimzi.kafka.EnvVarConfigProvider").editOrNewExternalConfiguration().addNewEnv().withName("FILE_SINK_FILE").withNewValueFrom().withNewConfigMapKeyRef("file", cmName, false).endValueFrom().endEnv().endExternalConfiguration().endSpec().build());
LOGGER.info("Creating needed RoleBinding and Role for Kubernetes Config Provider");
ResourceManager.getInstance().createResource(extensionContext, new RoleBindingBuilder().editOrNewMetadata().withName("connector-config-rb").withNamespace(namespaceName).endMetadata().withSubjects(new SubjectBuilder().withKind("ServiceAccount").withName(clusterName + "-connect").withNamespace(namespaceName).build()).withRoleRef(new RoleRefBuilder().withKind("Role").withName(configRoleName).withApiGroup("rbac.authorization.k8s.io").build()).build());
// create a role
Role configRole = new RoleBuilder().editOrNewMetadata().withName(configRoleName).withNamespace(namespaceName).endMetadata().addNewRule().withApiGroups("").withResources("configmaps").withResourceNames(cmName).withVerbs("get").endRule().build();
kubeClient().getClient().resource(configRole).createOrReplace();
String configPrefix = "configmaps:" + namespaceName + "/connector-config:";
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", "${env:FILE_SINK_FILE}").addToConfig("key.converter", "${" + configPrefix + "key}").addToConfig("value.converter", "${" + configPrefix + "value}").addToConfig("topics", "${" + configPrefix + "topics}").endSpec().build());
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
String kafkaConnectPodName = kubeClient().listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName();
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, kafkaConnectPodName, customFileSinkPath, "Hello-world - 99");
}
use of io.fabric8.kubernetes.api.model.rbac.RoleBuilder in project strimzi-kafka-operator by strimzi.
the class SetupClusterOperator method applyClusterOperatorInstallFiles.
/**
* Perform application of ServiceAccount, Roles and CRDs needed for proper cluster operator deployment.
* Configuration files are loaded from packaging/install/cluster-operator directory.
*/
public void applyClusterOperatorInstallFiles(String namespace) {
List<File> operatorFiles = Arrays.stream(new File(CO_INSTALL_DIR).listFiles()).sorted().filter(File::isFile).filter(file -> !file.getName().matches(".*(Binding|Deployment)-.*")).collect(Collectors.toList());
for (File operatorFile : operatorFiles) {
File createFile = operatorFile;
if (createFile.getName().contains(Constants.CLUSTER_ROLE + "-")) {
createFile = switchClusterRolesToRolesIfNeeded(createFile);
}
final String resourceType = createFile.getName().split("-")[1];
LOGGER.debug("Installation resource type: {}", resourceType);
switch(resourceType) {
case Constants.ROLE:
Role role = TestUtils.configFromYaml(createFile, Role.class);
ResourceManager.getInstance().createResource(extensionContext, new RoleBuilder(role).editMetadata().withNamespace(namespace).endMetadata().build());
break;
case Constants.CLUSTER_ROLE:
ClusterRole clusterRole = TestUtils.configFromYaml(createFile, ClusterRole.class);
ResourceManager.getInstance().createResource(extensionContext, clusterRole);
break;
case Constants.SERVICE_ACCOUNT:
ServiceAccount serviceAccount = TestUtils.configFromYaml(createFile, ServiceAccount.class);
ResourceManager.getInstance().createResource(extensionContext, new ServiceAccountBuilder(serviceAccount).editMetadata().withNamespace(namespace).endMetadata().build());
break;
case Constants.CONFIG_MAP:
ConfigMap configMap = TestUtils.configFromYaml(createFile, ConfigMap.class);
ResourceManager.getInstance().createResource(extensionContext, new ConfigMapBuilder(configMap).editMetadata().withNamespace(namespace).endMetadata().build());
break;
case Constants.CUSTOM_RESOURCE_DEFINITION_SHORT:
CustomResourceDefinition customResourceDefinition = TestUtils.configFromYaml(createFile, CustomResourceDefinition.class);
ResourceManager.getInstance().createResource(extensionContext, customResourceDefinition);
break;
default:
LOGGER.error("Unknown installation resource type: {}", resourceType);
throw new RuntimeException("Unknown installation resource type:" + resourceType);
}
}
}
use of io.fabric8.kubernetes.api.model.rbac.RoleBuilder in project strimzi by strimzi.
the class RoleResource method role.
public static void role(ExtensionContext extensionContext, String yamlPath, String namespace) {
LOGGER.info("Creating Role from {} in namespace {}", yamlPath, namespace);
Role role = getRoleFromYaml(yamlPath);
ResourceManager.getInstance().createResource(extensionContext, new RoleBuilder(role).editMetadata().withNamespace(namespace).endMetadata().build());
}
Aggregations