use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.
the class StorageDiffTest method testCrossDiff.
@ParallelTest
public void testCrossDiff() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
StorageDiff diffJbodEphemeral = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, ephemeral, 3, 3);
StorageDiff diffPersistentEphemeral = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, ephemeral, 3, 3);
StorageDiff diffJbodPersistent = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, persistent, 3, 3);
assertThat(diffJbodEphemeral.changesType(), is(true));
assertThat(diffPersistentEphemeral.changesType(), is(true));
assertThat(diffJbodPersistent.changesType(), is(true));
assertThat(diffJbodEphemeral.isEmpty(), is(false));
assertThat(diffPersistentEphemeral.isEmpty(), is(false));
assertThat(diffJbodPersistent.isEmpty(), is(false));
assertThat(diffJbodEphemeral.isVolumesAddedOrRemoved(), is(false));
assertThat(diffPersistentEphemeral.isVolumesAddedOrRemoved(), is(false));
assertThat(diffJbodPersistent.isVolumesAddedOrRemoved(), is(false));
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.
the class KafkaSpecCheckerTest method checkKafkaJbodStorage.
@Test
public void checkKafkaJbodStorage() {
Kafka kafka = new KafkaBuilder(ResourceUtils.createKafka(NAMESPACE, NAME, 1, IMAGE, HEALTH_DELAY, HEALTH_TIMEOUT, null, emptyMap(), emptyMap(), new JbodStorageBuilder().withVolumes(new EphemeralStorageBuilder().withId(1).build(), new EphemeralStorageBuilder().withId(2).build()).build(), new EphemeralStorage(), null, null, null, null)).editSpec().editZookeeper().withReplicas(3).endZookeeper().endSpec().build();
KafkaSpecChecker checker = generateChecker(kafka);
List<Condition> warnings = checker.run();
assertThat(warnings, hasSize(1));
Condition warning = warnings.get(0);
assertThat(warning.getReason(), is("KafkaStorage"));
assertThat(warning.getStatus(), is("True"));
assertThat(warning.getMessage(), is("A Kafka cluster with a single replica and ephemeral storage will lose topic messages after any restart or rolling update."));
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.
the class KafkaST method testKafkaJBODDeleteClaimsTrue.
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.
the class AbstractModelTest method testCreatePersistentVolumeClaims.
@ParallelTest
public void testCreatePersistentVolumeClaims() {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().withNewSpec().withNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withTls(false).withType(KafkaListenerType.INTERNAL).build()).withReplicas(2).withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup());
// JBOD Storage
Storage storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build();
List<PersistentVolumeClaim> pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(4));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
assertThat(pvcs.get(2).getMetadata().getName(), is("data-1-my-cluster-kafka-0"));
assertThat(pvcs.get(3).getMetadata().getName(), is("data-1-my-cluster-kafka-1"));
// JBOD with Ephemeral storage
storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new EphemeralStorageBuilder().withId(1).build()).build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
// Persistent Claim storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Persistent Claim with ID storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Ephemeral Storage
storage = new EphemeralStorageBuilder().build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(0));
// JBOD Storage without ID
final Storage finalStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build()).build();
InvalidResourceException ex = Assertions.assertThrows(InvalidResourceException.class, () -> kc.generatePersistentVolumeClaims(finalStorage));
assertThat(ex.getMessage(), is("The 'id' property is required for volumes in JBOD storage."));
}
use of io.strimzi.api.kafka.model.storage.JbodStorageBuilder in project strimzi by strimzi.
the class KafkaClusterTest method testStorageReverting.
@ParallelTest
public void testStorageReverting() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
// Test Storage changes and how the are reverted
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(jbod).endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, ephemeral, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(ephemeral));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(jbod).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, persistent, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(persistent));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(ephemeral).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(jbod));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(persistent).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(jbod));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
}
Aggregations