use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class SecurityST method testCertRenewalInMaintenanceTimeWindow.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testCertRenewalInMaintenanceTimeWindow(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String clusterSecretName = KafkaResources.clusterCaCertificateSecretName(testStorage.getClusterName());
final String clientsSecretName = KafkaResources.clientsCaCertificateSecretName(testStorage.getClusterName());
LocalDateTime maintenanceWindowStart = LocalDateTime.now().withSecond(0);
long maintenanceWindowDuration = 14;
maintenanceWindowStart = maintenanceWindowStart.plusMinutes(15);
final long windowStartMin = maintenanceWindowStart.getMinute();
final long windowStopMin = windowStartMin + maintenanceWindowDuration > 59 ? windowStartMin + maintenanceWindowDuration - 60 : windowStartMin + maintenanceWindowDuration;
String maintenanceWindowCron = "* " + windowStartMin + "-" + windowStopMin + " * * * ? *";
LOGGER.info("Initial maintenanceTimeWindow is: {}", maintenanceWindowCron);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 1).editSpec().addToMaintenanceTimeWindows(maintenanceWindowCron).editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().withNewClusterCa().withRenewalDays(15).withValidityDays(20).endClusterCa().withNewClientsCa().withRenewalDays(15).withValidityDays(20).endClientsCa().endSpec().build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
Secret kafkaUserSecret = kubeClient(testStorage.getNamespaceName()).getSecret(testStorage.getNamespaceName(), testStorage.getUserName());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
CertificateAuthority newCAValidity = new CertificateAuthority();
newCAValidity.setRenewalDays(150);
newCAValidity.setValidityDays(200);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> {
k.getSpec().setClusterCa(newCAValidity);
k.getSpec().setClientsCa(newCAValidity);
}, testStorage.getNamespaceName());
KafkaUtils.waitForKafkaStatusUpdate(testStorage.getNamespaceName(), testStorage.getClusterName());
Secret secretCaCluster = kubeClient(testStorage.getNamespaceName()).getSecret(testStorage.getNamespaceName(), clusterSecretName);
Secret secretCaClients = kubeClient(testStorage.getNamespaceName()).getSecret(testStorage.getNamespaceName(), clientsSecretName);
assertThat("Cluster CA certificate has been renewed outside of maintenanceTimeWindows", secretCaCluster.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION), is("0"));
assertThat("Clients CA certificate has been renewed outside of maintenanceTimeWindows", secretCaClients.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION), is("0"));
assertThat("Rolling update was performed outside of maintenanceTimeWindows", kafkaPods, is(PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector())));
maintenanceWindowCron = "* " + LocalDateTime.now().getMinute() + "-" + windowStopMin + " * * * ? *";
LOGGER.info("Set maintenanceTimeWindow to start now to save time: {}", maintenanceWindowCron);
List<String> maintenanceTimeWindows = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getSpec().getMaintenanceTimeWindows();
maintenanceTimeWindows.add(maintenanceWindowCron);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> kafka.getSpec().setMaintenanceTimeWindows(maintenanceTimeWindows), testStorage.getNamespaceName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 1).editSpec().addToMaintenanceTimeWindows(maintenanceWindowCron).endSpec().build());
LOGGER.info("Wait until rolling update is triggered during maintenanceTimeWindows");
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
Secret kafkaUserSecretRolled = kubeClient(testStorage.getNamespaceName()).getSecret(testStorage.getNamespaceName(), testStorage.getUserName());
secretCaCluster = kubeClient(testStorage.getNamespaceName()).getSecret(testStorage.getNamespaceName(), clusterSecretName);
secretCaClients = kubeClient(testStorage.getNamespaceName()).getSecret(testStorage.getNamespaceName(), clientsSecretName);
assertThat("Cluster CA certificate has not been renewed within maintenanceTimeWindows", secretCaCluster.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION), is("1"));
assertThat("Clients CA certificate has not been renewed within maintenanceTimeWindows", secretCaClients.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION), is("1"));
assertThat("KafkaUser certificate has not been renewed within maintenanceTimeWindows", kafkaUserSecret, not(sameInstance(kafkaUserSecretRolled)));
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class SecurityST method testClientsCACertRenew.
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testClientsCACertRenew(ExtensionContext extensionContext) {
final TestStorage ts = new TestStorage(extensionContext, namespace);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(ts.getClusterName(), 3).editOrNewSpec().withNewClientsCa().withRenewalDays(15).withValidityDays(20).endClientsCa().endSpec().build());
String username = "strimzi-tls-user-" + new Random().nextInt(Integer.MAX_VALUE);
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(ts.getClusterName(), username).build());
final Map<String, String> zkPods = PodUtils.podSnapshot(ts.getNamespaceName(), ts.getZookeeperSelector());
final Map<String, String> kafkaPods = PodUtils.podSnapshot(ts.getNamespaceName(), ts.getKafkaSelector());
final Map<String, String> eoPod = DeploymentUtils.depSnapshot(ts.getNamespaceName(), ts.getEoDeploymentName());
// Check initial clientsCA validity days
Secret clientsCASecret = kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), KafkaResources.clientsCaCertificateSecretName(ts.getClusterName()));
X509Certificate cacert = SecretUtils.getCertificateFromSecret(clientsCASecret, "ca.crt");
Date initialCertStartTime = cacert.getNotBefore();
Date initialCertEndTime = cacert.getNotAfter();
// Check initial kafkauser validity days
X509Certificate userCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), username), "user.crt");
Date initialKafkaUserCertStartTime = userCert.getNotBefore();
Date initialKafkaUserCertEndTime = userCert.getNotAfter();
LOGGER.info("Change of kafka validity and renewal days - reconciliation should start.");
CertificateAuthority newClientsCA = new CertificateAuthority();
newClientsCA.setRenewalDays(150);
newClientsCA.setValidityDays(200);
KafkaResource.replaceKafkaResourceInSpecificNamespace(ts.getClusterName(), k -> k.getSpec().setClientsCa(newClientsCA), ts.getNamespaceName());
// On the next reconciliation, the Cluster Operator performs a `rolling update` only for the
// `Kafka pods`.
// a) ZooKeeper must not roll
RollingUpdateUtils.waitForNoRollingUpdate(ts.getNamespaceName(), ts.getZookeeperSelector(), zkPods);
// b) Kafka has to roll
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(ts.getNamespaceName(), ts.getKafkaSelector(), 3, kafkaPods);
// c) EO must roll (because User Operator uses Clients CA for issuing user certificates)
DeploymentUtils.waitTillDepHasRolled(ts.getNamespaceName(), ts.getEoDeploymentName(), 1, eoPod);
// Read renewed secret/certs again
clientsCASecret = kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), KafkaResources.clientsCaCertificateSecretName(ts.getClusterName()));
cacert = SecretUtils.getCertificateFromSecret(clientsCASecret, "ca.crt");
Date changedCertStartTime = cacert.getNotBefore();
Date changedCertEndTime = cacert.getNotAfter();
userCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), username), "user.crt");
Date changedKafkaUserCertStartTime = userCert.getNotBefore();
Date changedKafkaUserCertEndTime = userCert.getNotAfter();
LOGGER.info("Initial ClientsCA cert dates: " + initialCertStartTime + " --> " + initialCertEndTime);
LOGGER.info("Changed ClientsCA cert dates: " + changedCertStartTime + " --> " + changedCertEndTime);
LOGGER.info("Initial userCert dates: " + initialKafkaUserCertStartTime + " --> " + initialKafkaUserCertEndTime);
LOGGER.info("Changed userCert dates: " + changedKafkaUserCertStartTime + " --> " + changedKafkaUserCertEndTime);
String msg = "Error: original cert-end date: '" + initialCertEndTime + "' ends sooner than changed (prolonged) cert date '" + changedCertEndTime + "'";
assertThat(msg, initialCertEndTime.compareTo(changedCertEndTime) < 0);
assertThat("UserCert start date has been renewed", initialKafkaUserCertStartTime.compareTo(changedKafkaUserCertStartTime) < 0);
assertThat("UserCert end date has been renewed", initialKafkaUserCertEndTime.compareTo(changedKafkaUserCertEndTime) < 0);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class AlternativeReconcileTriggersST method testManualTriggeringRollingUpdate.
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings("checkstyle:MethodLength")
void testManualTriggeringRollingUpdate(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String continuousTopicName = "continuous-topic";
final String continuousProducerName = "continuous-" + testStorage.getProducerName();
final String continuousConsumerName = "continuous-" + testStorage.getConsumerName();
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).build());
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
Map<String, String> zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients continuousClients = new KafkaClientsBuilder().withProducerName(continuousProducerName).withConsumerName(continuousConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(testStorage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, continuousClients.producerStrimzi(), continuousClients.consumerStrimzi());
// ##############################
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// rolling update for kafka
// set annotation to trigger Kafka rolling update
LOGGER.info("Annotate Kafka {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, kafkaName);
StUtils.annotateStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), kafkaName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
// check annotation to trigger rolling update
assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), kafkaName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
// wait when annotation will be removed
TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// rolling update for zookeeper
// set annotation to trigger Zookeeper rolling update
LOGGER.info("Annotate Zookeeper {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, zkName);
StUtils.annotateStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
// check annotation to trigger rolling update
assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), 3, zkPods);
// wait when annotation will be removed
TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), newTopicName, 1, 1).build());
clients = new KafkaClientsBuilder(clients).withTopicName(newTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitForClientsSuccess(continuousProducerName, continuousConsumerName, testStorage.getNamespaceName(), continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.
/**
* Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
* not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
*/
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case. JBOD is not supported as well.")
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String continuousTopicName = "continuous-topic";
final String continuousProducerName = "continuous-" + testStorage.getProducerName();
final String continuousConsumerName = "continuous-" + testStorage.getConsumerName();
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(testStorage.getClusterName(), 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
final String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(continuousProducerName).withConsumerName(continuousConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(testStorage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi());
// ##############################
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Add Jbod volume to Kafka => triggers RU
LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().add(vol1);
}, testStorage.getNamespaceName());
// Wait util it rolls
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
// Remove Jbod volume to Kafka => triggers RU
LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().remove(vol1);
}, testStorage.getNamespaceName());
// Wait util it rolls
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitForClientsSuccess(continuousProducerName, continuousConsumerName, testStorage.getNamespaceName(), continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class KafkaRollerIsolatedST method testKafkaRollsWhenTopicIsUnderReplicated.
@ParallelNamespaceTest
@KRaftNotSupported("TopicOperator is not supported by KRaft mode and is used in this test class")
void testKafkaRollsWhenTopicIsUnderReplicated(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(clusterOperator.getDeploymentNamespace(), extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaStsName = KafkaResources.kafkaStatefulSetName(clusterName);
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaStsName);
Instant startTime = Instant.now();
// We need to start with 3 replicas / brokers,
// so that KafkaStreamsTopicStore topic gets set/distributed on this first 3 [0, 1, 2],
// since this topic has replication-factor 3 and minISR 2.
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).editSpec().editKafka().addToConfig("auto.create.topics.enable", "false").endKafka().endSpec().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
final int initialReplicas = kubeClient(namespaceName).listPods(kafkaSelector).size();
assertEquals(3, initialReplicas);
// Now that KafkaStreamsTopicStore topic is set on the first 3 brokers, lets spin-up another one.
int scaledUpReplicas = 4;
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().setReplicas(scaledUpReplicas), namespaceName);
kafkaPods = RollingUpdateUtils.waitForComponentScaleUpOrDown(namespaceName, kafkaSelector, scaledUpReplicas, kafkaPods);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 4, 4, 4).build());
// Test that the new pod does not have errors or failures in events
String uid = kubeClient(namespaceName).getPodUid(KafkaResources.kafkaPodName(clusterName, 3));
List<Event> events = kubeClient(namespaceName).listEventsByResourceUid(uid);
assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started));
// scale down
final int scaledDownReplicas = 3;
LOGGER.info("Scaling down to {}", scaledDownReplicas);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().setReplicas(scaledDownReplicas), namespaceName);
kafkaPods = RollingUpdateUtils.waitForComponentScaleUpOrDown(namespaceName, kafkaSelector, scaledDownReplicas, kafkaPods);
PodUtils.verifyThatRunningPodsAreStable(namespaceName, clusterName);
// set annotation to trigger Kafka rolling update
StUtils.annotateStatefulSetOrStrimziPodSet(namespaceName, kafkaStsName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, scaledDownReplicas, kafkaPods);
// Test that CO doesn't have any exceptions in log
Instant endTime = Instant.now();
long duration = Duration.between(startTime, endTime).toSeconds();
assertNoCoErrorsLogged(duration);
}
Aggregations