use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi-kafka-operator by strimzi.
the class StorageUtilsTest method testEphemeralStorage.
@ParallelTest
public void testEphemeralStorage() {
Storage notEphemeral = new PersistentClaimStorageBuilder().build();
Storage isEphemeral = new EphemeralStorageBuilder().build();
Storage includesEphemeral = new JbodStorageBuilder().withVolumes(new EphemeralStorageBuilder().withId(1).build(), new EphemeralStorageBuilder().withId(2).build()).build();
assertThat(StorageUtils.usesEphemeral(notEphemeral), is(false));
assertThat(StorageUtils.usesEphemeral(isEphemeral), is(true));
assertThat(StorageUtils.usesEphemeral(includesEphemeral), is(true));
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi-kafka-operator by strimzi.
the class ModelUtilsTest method testStorageSerializationAndDeserialization.
@ParallelTest
public void testStorageSerializationAndDeserialization() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(jbod)), is(jbod));
assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(ephemeral)), is(ephemeral));
assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(persistent)), is(persistent));
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi-kafka-operator by strimzi.
the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.
/**
* Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
* not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
*/
@ParallelNamespaceTest
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String continuousTopicName = "continuous-topic";
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
final String producerName = "hello-world-producer";
final String consumerName = "hello-world-consumer";
PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
final String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName);
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName);
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
// ##############################
String userName = KafkaUserUtils.generateRandomNameOfKafkaUser();
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// Add Jbod volume to Kafka => triggers RU
LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().add(vol1);
}, namespaceName);
// Wait util it rolls
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// Remove Jbod volume to Kafka => triggers RU
LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().remove(vol1);
}, namespaceName);
// Wait util it rolls
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, namespaceName, continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi-kafka-operator by strimzi.
the class JbodStorageTest method testReconcileWithVolumeRemovedFromJbodStorage.
@Test
public void testReconcileWithVolumeRemovedFromJbodStorage(VertxTestContext context) {
Checkpoint async = context.checkpoint();
// remove a volume from the Jbod Storage
volumes.remove(0);
Kafka kafkaWithRemovedJbodVolume = new KafkaBuilder(this.kafka).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(volumes).build()).endKafka().endSpec().build();
Set<String> expectedPvcs = expectedPvcs(kafka);
Set<String> expectedPvcsWithRemovedJbodStorageVolume = expectedPvcs(kafkaWithRemovedJbodVolume);
// reconcile for kafka cluster creation
operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcs));
}))).compose(v -> {
Crds.kafkaOperation(mockClient).inNamespace(NAMESPACE).withName(NAME).patch(kafkaWithRemovedJbodVolume);
// reconcile kafka cluster with a Jbod storage volume removed
return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcsWithRemovedJbodStorageVolume));
async.flag();
})));
}
Aggregations