use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class JbodStorageTest method testReconcileWithNewVolumeAddedToJbodStorage.
@Test
public void testReconcileWithNewVolumeAddedToJbodStorage(VertxTestContext context) {
Checkpoint async = context.checkpoint();
// Add a new volume to Jbod Storage
volumes.add(new PersistentClaimStorageBuilder().withId(2).withDeleteClaim(false).withSize("100Gi").build());
Kafka kafkaWithNewJbodVolume = new KafkaBuilder(kafka).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(volumes).build()).endKafka().endSpec().build();
Set<String> expectedPvcs = expectedPvcs(kafka);
Set<String> expectedPvcsWithNewJbodStorageVolume = expectedPvcs(kafkaWithNewJbodVolume);
// reconcile for kafka cluster creation
operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcs));
}))).compose(v -> {
Crds.kafkaOperation(mockClient).inNamespace(NAMESPACE).withName(NAME).patch(kafkaWithNewJbodVolume);
// reconcile kafka cluster with new Jbod storage
return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcsWithNewJbodStorageVolume));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KRaftUtilsTest method testInvalidKafka.
@ParallelTest
public void testInvalidKafka() {
KafkaSpec spec = new KafkaSpecBuilder().withNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("listener").withPort(9092).withTls(true).withType(KafkaListenerType.INTERNAL).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build()).withNewJbodStorage().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withId(1).withSize("100Gi").build()).endJbodStorage().withNewKafkaAuthorizationSimple().endKafkaAuthorizationSimple().endKafka().build();
InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> KRaftUtils.validateKafkaCrForKRaft(spec));
assertThat(ex.getMessage(), is("Kafka configuration is not valid: [Authentication of type 'scram-sha-512` is currently not supported when the UseKRaft feature gate is enabled, Using more than one disk in a JBOD storage is currently not supported when the UseKRaft feature gate is enabled]"));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KRaftUtilsTest method testJbodStorageWithMultipleDisks.
@ParallelTest
public void testJbodStorageWithMultipleDisks() {
Set<String> errors = new HashSet<>(0);
KafkaClusterSpec kcs = new KafkaClusterSpecBuilder().withNewJbodStorage().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withId(1).withSize("100Gi").build()).endJbodStorage().build();
KRaftUtils.validateKafkaSpec(errors, kcs);
assertThat(errors, is(Set.of("Using more than one disk in a JBOD storage is currently not supported when the UseKRaft feature gate is enabled")));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class AbstractModelTest method testCreatePersistentVolumeClaims.
@ParallelTest
public void testCreatePersistentVolumeClaims() {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().withNewSpec().withNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withTls(false).withType(KafkaListenerType.INTERNAL).build()).withReplicas(2).withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup());
// JBOD Storage
Storage storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build();
List<PersistentVolumeClaim> pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(4));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
assertThat(pvcs.get(2).getMetadata().getName(), is("data-1-my-cluster-kafka-0"));
assertThat(pvcs.get(3).getMetadata().getName(), is("data-1-my-cluster-kafka-1"));
// JBOD with Ephemeral storage
storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new EphemeralStorageBuilder().withId(1).build()).build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
// Persistent Claim storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Persistent Claim with ID storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Ephemeral Storage
storage = new EphemeralStorageBuilder().build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(0));
// JBOD Storage without ID
final Storage finalStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build()).build();
InvalidResourceException ex = Assertions.assertThrows(InvalidResourceException.class, () -> kc.generatePersistentVolumeClaims(finalStorage));
assertThat(ex.getMessage(), is("The 'id' property is required for volumes in JBOD storage."));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KafkaBrokerConfigurationBuilderTest method testPersistentStorageLogDirs.
@ParallelTest
public void testPersistentStorageLogDirs() {
Storage storage = new PersistentClaimStorageBuilder().withSize("1Ti").withStorageClass("aws-ebs").withDeleteClaim(true).build();
String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withLogDirs(VolumeUtils.createVolumeMounts(storage, "/var/lib/kafka", false)).build();
assertThat(configuration, isEquivalent("log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}"));
}
Aggregations