use of io.strimzi.api.kafka.model.KafkaResources.clientsCaKeySecretName in project strimzi by strimzi.
the class SecurityST method testOwnerReferenceOfCASecrets.
@ParallelNamespaceTest
void testOwnerReferenceOfCASecrets(ExtensionContext extensionContext) {
/* Different name for Kafka cluster to make the test quicker -> KafkaRoller is waiting for pods of "my-cluster" to become ready
for 5 minutes -> this will prevent the waiting. */
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String secondClusterName = "my-second-cluster-" + clusterName;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editOrNewSpec().withNewClusterCa().withGenerateSecretOwnerReference(false).endClusterCa().withNewClientsCa().withGenerateSecretOwnerReference(false).endClientsCa().endSpec().build());
LOGGER.info("Listing all cluster CAs for {}", clusterName);
List<Secret> caSecrets = kubeClient(namespaceName).listSecrets(namespaceName).stream().filter(secret -> secret.getMetadata().getName().contains(KafkaResources.clusterCaKeySecretName(clusterName)) || secret.getMetadata().getName().contains(KafkaResources.clientsCaKeySecretName(clusterName))).collect(Collectors.toList());
LOGGER.info("Deleting Kafka:{}", clusterName);
KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaUtils.waitForKafkaDeletion(namespaceName, clusterName);
LOGGER.info("Checking actual secrets after Kafka deletion");
caSecrets.forEach(caSecret -> {
String secretName = caSecret.getMetadata().getName();
LOGGER.info("Checking that {} secret is still present", secretName);
assertNotNull(kubeClient(namespaceName).getSecret(namespaceName, secretName));
LOGGER.info("Deleting secret: {}", secretName);
kubeClient(namespaceName).deleteSecret(namespaceName, secretName);
});
LOGGER.info("Deploying Kafka with generateSecretOwnerReference set to true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3).editOrNewSpec().editOrNewClusterCa().withGenerateSecretOwnerReference(true).endClusterCa().editOrNewClientsCa().withGenerateSecretOwnerReference(true).endClientsCa().endSpec().build());
caSecrets = kubeClient(namespaceName).listSecrets(namespaceName).stream().filter(secret -> secret.getMetadata().getName().contains(KafkaResources.clusterCaKeySecretName(secondClusterName)) || secret.getMetadata().getName().contains(KafkaResources.clientsCaKeySecretName(secondClusterName))).collect(Collectors.toList());
LOGGER.info("Deleting Kafka:{}", secondClusterName);
KafkaResource.kafkaClient().inNamespace(namespaceName).withName(secondClusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaUtils.waitForKafkaDeletion(namespaceName, secondClusterName);
LOGGER.info("Checking actual secrets after Kafka deletion");
caSecrets.forEach(caSecret -> {
String secretName = caSecret.getMetadata().getName();
LOGGER.info("Checking that {} secret is deleted", secretName);
TestUtils.waitFor("secret " + secretName + "deletion", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> kubeClient().getSecret(namespaceName, secretName) == null);
});
}
use of io.strimzi.api.kafka.model.KafkaResources.clientsCaKeySecretName in project strimzi-kafka-operator by strimzi.
the class SecurityST method testOwnerReferenceOfCASecrets.
@ParallelNamespaceTest
void testOwnerReferenceOfCASecrets(ExtensionContext extensionContext) {
/* Different name for Kafka cluster to make the test quicker -> KafkaRoller is waiting for pods of "my-cluster" to become ready
for 5 minutes -> this will prevent the waiting. */
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String secondClusterName = "my-second-cluster-" + clusterName;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editOrNewSpec().withNewClusterCa().withGenerateSecretOwnerReference(false).endClusterCa().withNewClientsCa().withGenerateSecretOwnerReference(false).endClientsCa().endSpec().build());
LOGGER.info("Listing all cluster CAs for {}", clusterName);
List<Secret> caSecrets = kubeClient(namespaceName).listSecrets(namespaceName).stream().filter(secret -> secret.getMetadata().getName().contains(KafkaResources.clusterCaKeySecretName(clusterName)) || secret.getMetadata().getName().contains(KafkaResources.clientsCaKeySecretName(clusterName))).collect(Collectors.toList());
LOGGER.info("Deleting Kafka:{}", clusterName);
KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaUtils.waitForKafkaDeletion(namespaceName, clusterName);
LOGGER.info("Checking actual secrets after Kafka deletion");
caSecrets.forEach(caSecret -> {
String secretName = caSecret.getMetadata().getName();
LOGGER.info("Checking that {} secret is still present", secretName);
assertNotNull(kubeClient(namespaceName).getSecret(namespaceName, secretName));
LOGGER.info("Deleting secret: {}", secretName);
kubeClient(namespaceName).deleteSecret(namespaceName, secretName);
});
LOGGER.info("Deploying Kafka with generateSecretOwnerReference set to true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3).editOrNewSpec().editOrNewClusterCa().withGenerateSecretOwnerReference(true).endClusterCa().editOrNewClientsCa().withGenerateSecretOwnerReference(true).endClientsCa().endSpec().build());
caSecrets = kubeClient(namespaceName).listSecrets(namespaceName).stream().filter(secret -> secret.getMetadata().getName().contains(KafkaResources.clusterCaKeySecretName(secondClusterName)) || secret.getMetadata().getName().contains(KafkaResources.clientsCaKeySecretName(secondClusterName))).collect(Collectors.toList());
LOGGER.info("Deleting Kafka:{}", secondClusterName);
KafkaResource.kafkaClient().inNamespace(namespaceName).withName(secondClusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaUtils.waitForKafkaDeletion(namespaceName, secondClusterName);
LOGGER.info("Checking actual secrets after Kafka deletion");
caSecrets.forEach(caSecret -> {
String secretName = caSecret.getMetadata().getName();
LOGGER.info("Checking that {} secret is deleted", secretName);
TestUtils.waitFor("secret " + secretName + "deletion", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> kubeClient().getSecret(namespaceName, secretName) == null);
});
}
Aggregations