use of io.fabric8.kubernetes.api.model.rbac.Subject in project strimzi by strimzi.
the class RoleBindingOperatorIT method getOriginal.
@Override
protected RoleBinding getOriginal() {
Subject ks = new SubjectBuilder().withKind("ServiceAccount").withName("my-service-account").withNamespace("my-namespace").build();
RoleRef roleRef = new RoleRefBuilder().withName("my-cluster-role").withApiGroup("rbac.authorization.k8s.io").withKind("ClusterRole").build();
return new RoleBindingBuilder().withNewMetadata().withName(resourceName).withNamespace(namespace).withLabels(singletonMap("state", "new")).endMetadata().withSubjects(ks).withRoleRef(roleRef).build();
}
use of io.fabric8.kubernetes.api.model.rbac.Subject in project strimzi by strimzi.
the class RoleBindingOperatorTest method resource.
@Override
protected RoleBinding resource() {
Subject ks = new SubjectBuilder().withKind("ServiceAccount").withName("some-service-account").withNamespace(NAMESPACE).build();
RoleRef roleRef = new RoleRefBuilder().withName("some-role").withApiGroup("rbac.authorization.k8s.io").withKind("ClusterRole").build();
return new RoleBindingBuilder().withNewMetadata().withName(RESOURCE_NAME).withNamespace(NAMESPACE).withLabels(singletonMap("foo", "bar")).endMetadata().withRoleRef(roleRef).withSubjects(singletonList(ks)).build();
}
use of io.fabric8.kubernetes.api.model.rbac.Subject in project strimzi by strimzi.
the class AlternativeReconcileTriggersST method testTriggerRollingUpdateAfterOverrideBootstrap.
// This test is affected by https://github.com/strimzi/strimzi-kafka-operator/issues/3913 so it needs longer operation timeout set in CO
@Description("Test for checking that overriding of bootstrap server, triggers the rolling update and verifying that" + " new bootstrap DNS is appended inside certificate in subject alternative names property.")
@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
void testTriggerRollingUpdateAfterOverrideBootstrap(ExtensionContext extensionContext) throws CertificateException {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String bootstrapDns = "kafka-test.XXXX.azure.XXXX.net";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Adding new bootstrap dns: {} to external listeners", bootstrapDns);
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBootstrap().withAlternativeNames(bootstrapDns).endBootstrap().endConfiguration().build()));
}, namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
Map<String, String> secretData = kubeClient().getSecret(namespaceName, KafkaResources.brokersServiceName(clusterName)).getData();
for (Map.Entry<String, String> item : secretData.entrySet()) {
if (item.getKey().endsWith(".crt")) {
LOGGER.info("Encoding {} cert", item.getKey());
ByteArrayInputStream publicCert = new ByteArrayInputStream(Base64.getDecoder().decode(item.getValue().getBytes()));
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
Certificate certificate = certificateFactory.generateCertificate(publicCert);
LOGGER.info("Verifying that new DNS is in certificate subject alternative names");
assertThat(certificate.toString(), containsString(bootstrapDns));
}
}
}
use of io.fabric8.kubernetes.api.model.rbac.Subject in project strimzi by strimzi.
the class SecurityST method testCustomClusterCAClientsCA.
@ParallelNamespaceTest
void testCustomClusterCAClientsCA(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
generateAndDeployCustomStrimziCA(namespaceName, clusterName);
checkCustomCAsCorrectness(namespaceName, clusterName);
LOGGER.info(" Deploy kafka with new certs/secrets.");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editSpec().withNewClusterCa().withGenerateCertificateAuthority(false).endClusterCa().withNewClientsCa().withGenerateCertificateAuthority(false).endClientsCa().editKafka().withListeners(new GenericKafkaListenerBuilder().withType(KafkaListenerType.INTERNAL).withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withTls(false).build(), new GenericKafkaListenerBuilder().withType(KafkaListenerType.INTERNAL).withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()).endKafka().endSpec().build());
LOGGER.info("Check Kafka(s) and Zookeeper(s) certificates.");
X509Certificate kafkaCert = SecretUtils.getCertificateFromSecret(kubeClient(namespaceName).getSecret(namespaceName, clusterName + "-kafka-brokers"), clusterName + "-kafka-0.crt");
assertThat("KafkaCert does not have expected test Issuer: " + kafkaCert.getIssuerDN(), SystemTestCertManager.containsAllDN(kafkaCert.getIssuerX500Principal().getName(), STRIMZI_TEST_CLUSTER_CA));
X509Certificate zookeeperCert = SecretUtils.getCertificateFromSecret(kubeClient(namespaceName).getSecret(namespaceName, clusterName + "-zookeeper-nodes"), clusterName + "-zookeeper-0.crt");
assertThat("ZookeeperCert does not have expected test Subject: " + zookeeperCert.getIssuerDN(), SystemTestCertManager.containsAllDN(zookeeperCert.getIssuerX500Principal().getName(), STRIMZI_TEST_CLUSTER_CA));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
LOGGER.info("Check KafkaUser certificate.");
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
X509Certificate userCert = SecretUtils.getCertificateFromSecret(kubeClient(namespaceName).getSecret(namespaceName, userName), "user.crt");
assertThat("Generated ClientsCA does not have expected test Subject: " + userCert.getIssuerDN(), SystemTestCertManager.containsAllDN(userCert.getIssuerX500Principal().getName(), STRIMZI_TEST_CLIENTS_CA));
LOGGER.info("Send and receive messages over TLS.");
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, user).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Check for certificates used within kafka pod internal clients (producer/consumer)");
List<VolumeMount> volumeMounts = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getSpec().getContainers().get(0).getVolumeMounts();
for (VolumeMount vm : volumeMounts) {
if (vm.getMountPath().contains("user-secret-" + internalKafkaClient.getKafkaUsername())) {
assertThat("UserCert Issuer DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "issuer", STRIMZI_INTERMEDIATE_CA));
assertThat("UserCert Subject DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "subject", STRIMZI_TEST_CLIENTS_CA));
} else if (vm.getMountPath().contains("cluster-ca-" + internalKafkaClient.getKafkaUsername())) {
assertThat("ClusterCA Issuer DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "issuer", STRIMZI_INTERMEDIATE_CA));
assertThat("ClusterCA Subject DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "subject", STRIMZI_TEST_CLUSTER_CA));
}
}
LOGGER.info("Checking produced and consumed messages via TLS to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
}
use of io.fabric8.kubernetes.api.model.rbac.Subject in project strimzi by strimzi.
the class KafkaCluster method generateClusterRoleBinding.
/**
* Creates the ClusterRoleBinding which is used to bind the Kafka SA to the ClusterRole
* which permissions the Kafka init container to access K8S nodes (necessary for rack-awareness).
*
* @param assemblyNamespace The namespace.
* @return The cluster role binding.
*/
public ClusterRoleBinding generateClusterRoleBinding(String assemblyNamespace) {
if (rack != null || isExposedWithNodePort()) {
Subject ks = new SubjectBuilder().withKind("ServiceAccount").withName(getServiceAccountName()).withNamespace(assemblyNamespace).build();
RoleRef roleRef = new RoleRefBuilder().withName("strimzi-kafka-broker").withApiGroup("rbac.authorization.k8s.io").withKind("ClusterRole").build();
return getClusterRoleBinding(KafkaResources.initContainerClusterRoleBindingName(cluster, namespace), ks, roleRef);
} else {
return null;
}
}
Aggregations