use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.
/**
* Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
* not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
*/
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case. JBOD is not supported as well.")
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String continuousTopicName = "continuous-topic";
final String continuousProducerName = "continuous-" + testStorage.getProducerName();
final String continuousConsumerName = "continuous-" + testStorage.getConsumerName();
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(testStorage.getClusterName(), 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
final String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(continuousProducerName).withConsumerName(continuousConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(testStorage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi());
// ##############################
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Add Jbod volume to Kafka => triggers RU
LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().add(vol1);
}, testStorage.getNamespaceName());
// Wait util it rolls
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
// Remove Jbod volume to Kafka => triggers RU
LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().remove(vol1);
}, testStorage.getNamespaceName());
// Wait util it rolls
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitForClientsSuccess(continuousProducerName, continuousConsumerName, testStorage.getNamespaceName(), continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaClusterTest method testStorageValidationAfterInitialDeployment.
@ParallelTest
public void testStorageValidationAfterInitialDeployment() {
assertThrows(InvalidResourceException.class, () -> {
Storage oldStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withSize("100Gi").build()).build();
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(List.of()).build()).endKafka().endSpec().build();
KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, oldStorage, replicas, false);
});
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaClusterTest method testPvcNames.
@ParallelTest
public void testPvcNames() {
Kafka assembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("100Gi").build()).endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, assembly, VERSIONS);
List<PersistentVolumeClaim> pvcs = kc.getPersistentVolumeClaimTemplates();
for (int i = 0; i < replicas; i++) {
assertThat(pvcs.get(0).getMetadata().getName() + "-" + KafkaResources.kafkaPodName(cluster, i), is(KafkaCluster.VOLUME_NAME + "-" + KafkaResources.kafkaPodName(cluster, i)));
}
assembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("100Gi").build()).build()).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, assembly, VERSIONS);
pvcs = kc.getPersistentVolumeClaimTemplates();
for (int i = 0; i < replicas; i++) {
int id = 0;
for (PersistentVolumeClaim pvc : pvcs) {
assertThat(pvc.getMetadata().getName() + "-" + KafkaResources.kafkaPodName(cluster, i), is(KafkaCluster.VOLUME_NAME + "-" + id++ + "-" + KafkaResources.kafkaPodName(cluster, i)));
}
}
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaClusterTest method testStorageReverting.
@ParallelTest
public void testStorageReverting() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
// Test Storage changes and how the are reverted
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(jbod).withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)).endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, ephemeral, replicas, false);
// Storage is reverted
assertThat(kc.getStorage(), is(ephemeral));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(jbod).withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, persistent, replicas, false);
// Storage is reverted
assertThat(kc.getStorage(), is(persistent));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(ephemeral).withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas, false);
// Storage is reverted
assertThat(kc.getStorage(), is(jbod));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(persistent).withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas, false);
// Storage is reverted
assertThat(kc.getStorage(), is(jbod));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaClusterTest method testGeneratePersistentVolumeClaimsJbod.
@ParallelTest
public void testGeneratePersistentVolumeClaimsJbod() {
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-ssd-az1").build()).build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-st1-az1").build()).build()).build()).endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS);
// Check Storage annotation on STS
assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(kafkaAssembly.getSpec().getKafka().getStorage())));
// Check PVCs
List<PersistentVolumeClaim> pvcs = kc.generatePersistentVolumeClaims(kc.getStorage());
assertThat(pvcs.size(), is(6));
for (int i = 0; i < 3; i++) {
PersistentVolumeClaim pvc = pvcs.get(i);
assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi")));
if (i != 1) {
assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd"));
} else {
assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd-az1"));
}
assertThat(pvc.getMetadata().getName().startsWith(KafkaCluster.VOLUME_NAME), is(true));
assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0));
assertThat(pvc.getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false"));
}
for (int i = 3; i < 6; i++) {
PersistentVolumeClaim pvc = pvcs.get(i);
assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("1000Gi")));
if (i != 4) {
assertThat(pvc.getSpec().getStorageClassName(), is("gp2-st1"));
} else {
assertThat(pvc.getSpec().getStorageClassName(), is("gp2-st1-az1"));
}
assertThat(pvc.getMetadata().getName().startsWith(KafkaCluster.VOLUME_NAME), is(true));
assertThat(pvc.getMetadata().getOwnerReferences().size(), is(1));
assertThat(pvc.getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_DELETE_CLAIM), is("true"));
}
}
Aggregations