use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class StorageDiffTest method testSizeChanges.
@ParallelTest
public void testSizeChanges() {
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
Storage persistent2 = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("1000Gi").build();
Storage persistent3 = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("10Gi").build();
// Used to test millibytes
Storage persistent4 = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("3.2Ti").build();
// Used to test millibytes
Storage persistent5 = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("3518437208883200m").build();
assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).shrinkSize(), is(false));
assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).shrinkSize(), is(false));
assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent3, 3, 3).shrinkSize(), is(true));
assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent4, persistent5, 3, 3).shrinkSize(), is(false));
assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent4, persistent, 3, 3).shrinkSize(), is(true));
assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent5, persistent, 3, 3).shrinkSize(), is(true));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi-kafka-operator by strimzi.
the class ZookeeperClusterTest method testStorageValidationAfterInitialDeployment.
@ParallelTest
public void testStorageValidationAfterInitialDeployment() {
assertThrows(InvalidResourceException.class, () -> {
Storage oldStorage = new PersistentClaimStorageBuilder().withSize("100Gi").build();
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configurationJson, zooConfigurationJson)).editSpec().editZookeeper().withStorage(new PersistentClaimStorageBuilder().build()).endZookeeper().endSpec().build();
ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, oldStorage, replicas);
});
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.
/**
* Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
* not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
*/
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case. JBOD is not supported as well.")
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String continuousTopicName = "continuous-topic";
final String continuousProducerName = "continuous-" + testStorage.getProducerName();
final String continuousConsumerName = "continuous-" + testStorage.getConsumerName();
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(testStorage.getClusterName(), 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
final String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(continuousProducerName).withConsumerName(continuousConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(testStorage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi());
// ##############################
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Add Jbod volume to Kafka => triggers RU
LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().add(vol1);
}, testStorage.getNamespaceName());
// Wait util it rolls
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
// Remove Jbod volume to Kafka => triggers RU
LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().remove(vol1);
}, testStorage.getNamespaceName());
// Wait util it rolls
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitForClientsSuccess(continuousProducerName, continuousConsumerName, testStorage.getNamespaceName(), continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KafkaBrokerConfigurationBuilderTest method testJbodStorageLogDirs.
@ParallelTest
public void testJbodStorageLogDirs() {
SingleVolumeStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Ti").withStorageClass("aws-ebs").withDeleteClaim(true).build();
SingleVolumeStorage vol2 = new EphemeralStorageBuilder().withId(2).withSizeLimit("5Gi").build();
SingleVolumeStorage vol5 = new PersistentClaimStorageBuilder().withId(5).withSize("10Ti").withStorageClass("aws-ebs").withDeleteClaim(false).build();
Storage storage = new JbodStorageBuilder().withVolumes(vol1, vol2, vol5).build();
String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withLogDirs(VolumeUtils.createVolumeMounts(storage, "/var/lib/kafka", false)).build();
assertThat(configuration, isEquivalent("log.dirs=/var/lib/kafka/data-1/kafka-log${STRIMZI_BROKER_ID},/var/lib/kafka/data-2/kafka-log${STRIMZI_BROKER_ID},/var/lib/kafka/data-5/kafka-log${STRIMZI_BROKER_ID}"));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KafkaClusterTest method testPvcNames.
@ParallelTest
public void testPvcNames() {
Kafka assembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("100Gi").build()).endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, assembly, VERSIONS);
List<PersistentVolumeClaim> pvcs = kc.getPersistentVolumeClaimTemplates();
for (int i = 0; i < replicas; i++) {
assertThat(pvcs.get(0).getMetadata().getName() + "-" + KafkaResources.kafkaPodName(cluster, i), is(KafkaCluster.VOLUME_NAME + "-" + KafkaResources.kafkaPodName(cluster, i)));
}
assembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("100Gi").build()).build()).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, assembly, VERSIONS);
pvcs = kc.getPersistentVolumeClaimTemplates();
for (int i = 0; i < replicas; i++) {
int id = 0;
for (PersistentVolumeClaim pvc : pvcs) {
assertThat(pvc.getMetadata().getName() + "-" + KafkaResources.kafkaPodName(cluster, i), is(KafkaCluster.VOLUME_NAME + "-" + id++ + "-" + KafkaResources.kafkaPodName(cluster, i)));
}
}
}
Aggregations