Search in sources :

Example 26 with JbodStorageBuilder

use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.

the class ModelUtilsTest method testStorageSerializationAndDeserialization.

@ParallelTest
public void testStorageSerializationAndDeserialization() {
    Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
    Storage ephemeral = new EphemeralStorageBuilder().build();
    Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
    assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(jbod)), is(jbod));
    assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(ephemeral)), is(ephemeral));
    assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(persistent)), is(persistent));
}
Also used : JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) Storage(io.strimzi.api.kafka.model.storage.Storage) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) EphemeralStorageBuilder(io.strimzi.api.kafka.model.storage.EphemeralStorageBuilder) ParallelTest(io.strimzi.test.annotations.ParallelTest)

Example 27 with JbodStorageBuilder

use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.

the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.

/**
 * Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
 * not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
 */
@ParallelNamespaceTest
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String continuousTopicName = "continuous-topic";
    // 500 messages will take 500 seconds in that case
    final int continuousClientsMessageCount = 500;
    final String producerName = "hello-world-producer";
    final String consumerName = "hello-world-consumer";
    PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
    PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
    final String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName);
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName);
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
    // ##############################
    // Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
    // ##############################
    // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, continuousTopicName, 3, 3, 2).build());
    String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
    // Add transactional id to make producer transactional
    producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
    producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(namespaceName).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // ##############################
    String userName = KafkaUserUtils.generateRandomNameOfKafkaUser();
    KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
    resourceManager.createResource(extensionContext, user);
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
    InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
    internalKafkaClient.produceTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
    // Add Jbod volume to Kafka => triggers RU
    LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
        storage.getVolumes().add(vol1);
    }, namespaceName);
    // Wait util it rolls
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
    // Remove Jbod volume to Kafka => triggers RU
    LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
        storage.getVolumes().remove(vol1);
    }, namespaceName);
    // Wait util it rolls
    RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
    // ##############################
    // Validate that continuous clients finished successfully
    // ##############################
    ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, namespaceName, continuousClientsMessageCount);
// ##############################
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) JbodStorage(io.strimzi.api.kafka.model.storage.JbodStorage) JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 28 with JbodStorageBuilder

use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.

the class StorageDiffTest method testJbodDiffWithNewVolume.

@ParallelTest
public void testJbodDiffWithNewVolume() {
    Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("1000Gi").build()).build();
    Storage jbod2 = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("1000Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
    Storage jbod3 = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
    Storage jbod4 = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(2).withSize("1000Gi").build()).build();
    Storage jbod5 = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
    Storage jbod6 = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("1000Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(2).withSize("5000Gi").build()).build();
    // Volume added
    StorageDiff diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod2, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // Volume removed
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod2, jbod, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // Volume added with changes
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod3, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(false));
    assertThat(diff.shrinkSize(), is(true));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // No volume added, but with changes
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod2, jbod3, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(false));
    assertThat(diff.shrinkSize(), is(true));
    assertThat(diff.isVolumesAddedOrRemoved(), is(false));
    // Volume removed from the beginning
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod3, jbod5, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // Volume added to the beginning
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod5, jbod3, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // Volume replaced with another ID and another volume which is kept changed
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod3, jbod6, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(false));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // Volume replaced with another ID in single volume broker
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod4, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
    // Volume replaced with another ID without chenging the volumes which are kept
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod2, jbod6, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(true));
}
Also used : JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) Storage(io.strimzi.api.kafka.model.storage.Storage) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) ParallelTest(io.strimzi.test.annotations.ParallelTest)

Example 29 with JbodStorageBuilder

use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.

the class StorageDiffTest method testJbodDiff.

@ParallelTest
public void testJbodDiff() {
    Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
    Storage jbod2 = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("1000Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
    StorageDiff diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(true));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(false));
    diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod2, 3, 3);
    assertThat(diff.changesType(), is(false));
    assertThat(diff.isEmpty(), is(false));
    assertThat(diff.shrinkSize(), is(false));
    assertThat(diff.isVolumesAddedOrRemoved(), is(false));
}
Also used : JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) Storage(io.strimzi.api.kafka.model.storage.Storage) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) ParallelTest(io.strimzi.test.annotations.ParallelTest)

Example 30 with JbodStorageBuilder

use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.

the class KafkaST method testLabelsAndAnnotationForPVC.

@ParallelNamespaceTest
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String labelAnnotationKey = "testKey";
    final String firstValue = "testValue";
    final String changedValue = "editedTestValue";
    Map<String, String> pvcLabel = new HashMap<>();
    pvcLabel.put(labelAnnotationKey, firstValue);
    Map<String, String> pvcAnnotation = pvcLabel;
    Map<String, String> statefulSetLabels = new HashMap<>();
    statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
    statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).editSpec().editKafka().withNewTemplate().withNewStatefulset().withNewMetadata().withLabels(statefulSetLabels).endMetadata().endStatefulset().withNewPodSet().withNewMetadata().withLabels(statefulSetLabels).endMetadata().endPodSet().withNewPersistentVolumeClaim().withNewMetadata().addToLabels(pvcLabel).addToAnnotations(pvcAnnotation).endMetadata().endPersistentVolumeClaim().endTemplate().withStorage(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build()).endKafka().editZookeeper().withNewTemplate().withNewPersistentVolumeClaim().withNewMetadata().addToLabels(pvcLabel).addToAnnotations(pvcAnnotation).endMetadata().endPersistentVolumeClaim().endTemplate().withNewPersistentClaimStorage().withDeleteClaim(false).withSize("3Gi").endPersistentClaimStorage().endZookeeper().endSpec().build());
    LOGGER.info("Check if Kubernetes labels are applied");
    Map<String, String> actualStatefulSetLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
    assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
    assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
    LOGGER.info("Kubernetes labels are correctly set and present");
    List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
    assertThat(pvcs.size(), is(7));
    for (PersistentVolumeClaim pvc : pvcs) {
        LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
        assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
        assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
    }
    pvcLabel.put(labelAnnotationKey, changedValue);
    pvcAnnotation.put(labelAnnotationKey, changedValue);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
        kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
        kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
        kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
        kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
    }, namespaceName);
    PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
    PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
    KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
    pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
    LOGGER.info(pvcs.toString());
    assertThat(pvcs.size(), is(7));
    for (PersistentVolumeClaim pvc : pvcs) {
        LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
        assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
        assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
    }
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) CoreMatchers.is(org.hamcrest.CoreMatchers.is) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) CoreMatchers(org.hamcrest.CoreMatchers) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) KubeClusterResource.cmdKubeClient(io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient) Matchers.not(org.hamcrest.Matchers.not) ExecResult(io.strimzi.test.executor.ExecResult) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) SecurityContextBuilder(io.fabric8.kubernetes.api.model.SecurityContextBuilder) KafkaResources.kafkaStatefulSetName(io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName) Matchers.hasItems(org.hamcrest.Matchers.hasItems) PersistentVolumeClaimUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils) EntityTopicOperatorSpec(io.strimzi.api.kafka.model.EntityTopicOperatorSpec) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) Arrays.asList(java.util.Arrays.asList) SystemProperty(io.strimzi.api.kafka.model.SystemProperty) Duration(java.time.Duration) Map(java.util.Map) ConfigMapUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.ConfigMapUtils) Tag(org.junit.jupiter.api.Tag) ServiceUtils(io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils) StUtils(io.strimzi.systemtest.utils.StUtils) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) Matchers.notNullValue(org.hamcrest.Matchers.notNullValue) StUtils.configMap2Properties(io.strimzi.systemtest.utils.StUtils.configMap2Properties) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Instant(java.time.Instant) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Collectors(java.util.stream.Collectors) List(java.util.List) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) Secret(io.fabric8.kubernetes.api.model.Secret) Optional(java.util.Optional) KafkaTopicResource(io.strimzi.systemtest.resources.crd.KafkaTopicResource) Matchers.anyOf(org.hamcrest.Matchers.anyOf) CRUISE_CONTROL(io.strimzi.systemtest.Constants.CRUISE_CONTROL) Matchers.containsString(org.hamcrest.Matchers.containsString) AbstractST(io.strimzi.systemtest.AbstractST) ZookeeperClusterSpec(io.strimzi.api.kafka.model.ZookeeperClusterSpec) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) KafkaClientsTemplates(io.strimzi.systemtest.templates.crd.KafkaClientsTemplates) LOADBALANCER_SUPPORTED(io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) KafkaResources.zookeeperStatefulSetName(io.strimzi.api.kafka.model.KafkaResources.zookeeperStatefulSetName) HashMap(java.util.HashMap) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) StUtils.stringToProperties(io.strimzi.systemtest.utils.StUtils.stringToProperties) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaTopicList(io.strimzi.api.kafka.KafkaTopicList) Matchers.emptyOrNullString(org.hamcrest.Matchers.emptyOrNullString) TestUtils(io.strimzi.test.TestUtils) TestUtils.fromYamlString(io.strimzi.test.TestUtils.fromYamlString) Service(io.fabric8.kubernetes.api.model.Service) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) EntityOperatorSpec(io.strimzi.api.kafka.model.EntityOperatorSpec) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) JbodStorage(io.strimzi.api.kafka.model.storage.JbodStorage) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) Properties(java.util.Properties) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) Constants(io.strimzi.systemtest.Constants) Pod(io.fabric8.kubernetes.api.model.Pod) EntityUserOperatorSpec(io.strimzi.api.kafka.model.EntityUserOperatorSpec) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) KafkaCmdClient(io.strimzi.systemtest.cli.KafkaCmdClient) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) DeploymentUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils) Matchers.hasItem(org.hamcrest.Matchers.hasItem) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) SystemPropertyBuilder(io.strimzi.api.kafka.model.SystemPropertyBuilder) Kafka(io.strimzi.api.kafka.model.Kafka) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) Matchers.containsString(org.hamcrest.Matchers.containsString) Matchers.emptyOrNullString(org.hamcrest.Matchers.emptyOrNullString) TestUtils.fromYamlString(io.strimzi.test.TestUtils.fromYamlString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

JbodStorageBuilder (io.strimzi.api.kafka.model.storage.JbodStorageBuilder)54 PersistentClaimStorageBuilder (io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder)50 ParallelTest (io.strimzi.test.annotations.ParallelTest)32 Kafka (io.strimzi.api.kafka.model.Kafka)30 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)26 PersistentVolumeClaim (io.fabric8.kubernetes.api.model.PersistentVolumeClaim)20 JbodStorage (io.strimzi.api.kafka.model.storage.JbodStorage)20 Storage (io.strimzi.api.kafka.model.storage.Storage)20 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)16 EphemeralStorageBuilder (io.strimzi.api.kafka.model.storage.EphemeralStorageBuilder)14 KafkaListenerType (io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType)12 ArrayList (java.util.ArrayList)12 List (java.util.List)12 CoreMatchers.is (org.hamcrest.CoreMatchers.is)12 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)12 Quantity (io.fabric8.kubernetes.api.model.Quantity)10 TopologySpreadConstraint (io.fabric8.kubernetes.api.model.TopologySpreadConstraint)10 Labels (io.strimzi.operator.common.model.Labels)10 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)10 Collectors (java.util.stream.Collectors)10