use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class KafkaST method testPersistentStorageSize.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testPersistentStorageSize(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String[] diskSizes = { "70Gi", "20Gi" };
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl).editSpec().editKafka().withStorage(jbodStorage).endKafka().editZookeeper().withReplicas(1).endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class CruiseControlST method testCruiseControlIntraBrokerBalancing.
@ParallelNamespaceTest
void testCruiseControlIntraBrokerBalancing(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
String diskSize = "6Gi";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSize).build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSize).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editOrNewSpec().editKafka().withStorage(jbodStorage).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editOrNewSpec().withRebalanceDisk(true).endSpec().build());
KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaRebalanceState.ProposalReady);
LOGGER.info("Checking status of KafkaRebalance");
// The provision status should be "UNDECIDED" when doing an intra-broker disk balance because it is irrelevant to the provision status
KafkaRebalanceStatus kafkaRebalanceStatus = KafkaRebalanceResource.kafkaRebalanceClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus();
assertThat(kafkaRebalanceStatus.getOptimizationResult().get("provisionStatus").toString(), containsString("UNDECIDED"));
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class AbstractModel method validatePersistentStorage.
/**
* Validates persistent storage
* If storage is of a persistent type, validations are made
* If storage is not of a persistent type, validation passes
*
* @param storage Persistent Storage configuration
* @throws InvalidResourceException if validations fails for any reason
*/
protected static void validatePersistentStorage(Storage storage) {
if (storage instanceof PersistentClaimStorage) {
PersistentClaimStorage persistentClaimStorage = (PersistentClaimStorage) storage;
checkPersistentStorageSizeIsValid(persistentClaimStorage);
} else if (storage instanceof JbodStorage) {
JbodStorage jbodStorage = (JbodStorage) storage;
if (jbodStorage.getVolumes().size() == 0) {
throw new InvalidResourceException("JbodStorage needs to contain at least one volume!");
}
for (Storage jbodVolume : jbodStorage.getVolumes()) {
if (jbodVolume instanceof PersistentClaimStorage) {
PersistentClaimStorage persistentClaimStorage = (PersistentClaimStorage) jbodVolume;
checkPersistentStorageSizeIsValid(persistentClaimStorage);
}
}
}
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class AbstractModel method createPersistentVolumeClaims.
/**
* Creates list of PersistentVolumeClaims required by stateful deployments (Kafka and Zoo). This method calls itself
* recursively to handle volumes inside JBOD storage. When it calls itself to handle the volumes inside JBOD array,
* the {@code jbod} flag should be set to {@code true}. When called from outside, it should be set to {@code false}.
*
* @param storage The storage configuration
* @param jbod Indicator whether the {@code storage} is part of JBOD array or not
*
* @return List with Persistent Volume Claims
*/
protected List<PersistentVolumeClaim> createPersistentVolumeClaims(Storage storage, boolean jbod) {
List<PersistentVolumeClaim> pvcs = new ArrayList<>();
if (storage != null) {
if (storage instanceof PersistentClaimStorage) {
PersistentClaimStorage persistentStorage = (PersistentClaimStorage) storage;
String pvcBaseName = VolumeUtils.createVolumePrefix(persistentStorage.getId(), jbod) + "-" + name;
for (int i = 0; i < replicas; i++) {
pvcs.add(createPersistentVolumeClaim(i, pvcBaseName + "-" + i, persistentStorage));
}
} else if (storage instanceof JbodStorage) {
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
// it's called recursively for setting the information from the current volume
pvcs.addAll(createPersistentVolumeClaims(volume, true));
}
}
}
return pvcs;
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi-kafka-operator by strimzi.
the class KafkaST method testPersistentStorageSize.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testPersistentStorageSize(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String[] diskSizes = { "70Gi", "20Gi" };
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl).editSpec().editKafka().withStorage(jbodStorage).endKafka().editZookeeper().withReplicas(1).endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
Aggregations