use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class Capacity method generateJbodDiskCapacity.
/**
* Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @param idx Index of the broker
* @return Disk capacity configuration value as a JsonObject for broker idx
*/
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
JsonObject json = new JsonObject();
String size = "";
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
if (volume instanceof PersistentClaimStorage) {
size = ((PersistentClaimStorage) volume).getSize();
} else if (volume instanceof EphemeralStorage) {
size = ((EphemeralStorage) volume).getSizeLimit();
}
json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
}
return json;
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class Capacity method generateCapacityConfig.
/**
* Generate a capacity configuration for cluster
*
* @return Cruise Control capacity configuration as a String
*/
public String generateCapacityConfig() {
JsonArray brokerList = new JsonArray();
if (storage instanceof JbodStorage) {
// the broker pod index in their names.
for (int idx = 0; idx < replicas; idx++) {
JsonObject diskConfig = new JsonObject().put("DISK", generateJbodDiskCapacity(storage, idx));
JsonObject brokerEntry = generateBrokerCapacity(idx, diskConfig, "Capacity for Broker " + idx);
brokerList.add(brokerEntry);
}
} else {
// A capacity configuration for a cluster without a JBOD configuration
// can rely on a generic broker entry for all brokers
JsonObject diskConfig = new JsonObject().put("DISK", String.valueOf(diskMiB));
JsonObject defaultBrokerCapacity = generateBrokerCapacity(DEFAULT_BROKER_ID, diskConfig, DEFAULT_BROKER_DOC);
brokerList.add(defaultBrokerCapacity);
}
JsonObject config = new JsonObject();
config.put("brokerCapacities", brokerList);
return config.encodePrettily();
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class VolumeUtils method createPodSetVolumes.
/**
* Generates the list of data volumes as used in PodSets and individual Pods. This includes both ephemeral and
* persistent data volumes. This method calls itself recursively to create the volumes from a JBOD storage array.
* When it does so, it sets the {@code jbod} parameter to {@code true}. When called from outside, it should be set
* to {@code false}.
*
* @param podName Name of the pod used to name the volumes
* @param storage Storage configuration
* @param jbod Indicates that the storage is part of JBOD storage and volume names are created accordingly
*
* @return List of data volumes to be included in the StrimziPodSet pod
*/
public static List<Volume> createPodSetVolumes(String podName, Storage storage, boolean jbod) {
List<Volume> volumes = new ArrayList<>();
if (storage != null) {
if (storage instanceof JbodStorage) {
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
// it's called recursively for setting the information from the current volume
volumes.addAll(createPodSetVolumes(podName, volume, true));
}
} else if (storage instanceof EphemeralStorage) {
EphemeralStorage ephemeralStorage = (EphemeralStorage) storage;
volumes.add(createEmptyDirVolume(createVolumePrefix(ephemeralStorage.getId(), jbod), ephemeralStorage.getSizeLimit(), null));
} else if (storage instanceof PersistentClaimStorage) {
String name = createVolumePrefix(((PersistentClaimStorage) storage).getId(), jbod);
volumes.add(createPvcVolume(name, name + "-" + podName));
}
}
return volumes;
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class KafkaST method testKafkaJBODDeleteClaimsFalse.
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class KafkaST method testKafkaJBODDeleteClaimsTrueFalse.
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrueFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
Aggregations