use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class KafkaST method testKafkaJBODDeleteClaimsTrue.
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class Capacity method generateDiskCapacity.
/**
* Generate total disk capacity using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @return Disk capacity per broker as a Double
*/
public static Double generateDiskCapacity(Storage storage) {
if (storage instanceof PersistentClaimStorage) {
return getSizeInMiB(((PersistentClaimStorage) storage).getSize());
} else if (storage instanceof EphemeralStorage) {
if (((EphemeralStorage) storage).getSizeLimit() != null) {
return getSizeInMiB(((EphemeralStorage) storage).getSizeLimit());
} else {
return DEFAULT_BROKER_DISK_CAPACITY_IN_MIB;
}
} else if (storage instanceof JbodStorage) {
// The value generated here for JBOD storage is used for tracking the total
// disk capacity per broker. This will NOT be used for the final disk capacity
// configuration since JBOD storage requires a special disk configuration.
List<SingleVolumeStorage> volumeList = ((JbodStorage) storage).getVolumes();
double size = 0;
for (SingleVolumeStorage volume : volumeList) {
size += generateDiskCapacity(volume);
}
return size;
} else {
throw new IllegalStateException("The declared storage '" + storage.getType() + "' is not supported");
}
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class KafkaRebalanceAssemblyOperator method reconcileRebalance.
/**
* Reconcile loop for the KafkaRebalance
*/
/* test */
Future<Void> reconcileRebalance(Reconciliation reconciliation, KafkaRebalance kafkaRebalance) {
if (kafkaRebalance == null) {
LOGGER.infoCr(reconciliation, "Rebalance resource deleted");
return Future.succeededFuture();
}
String clusterName = kafkaRebalance.getMetadata().getLabels() == null ? null : kafkaRebalance.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL);
String clusterNamespace = kafkaRebalance.getMetadata().getNamespace();
if (clusterName == null) {
LOGGER.warnCr(reconciliation, "Resource lacks label '{}': No cluster related to a possible rebalance.", Labels.STRIMZI_CLUSTER_LABEL);
return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), new InvalidResourceException("Resource lacks label '" + Labels.STRIMZI_CLUSTER_LABEL + "': No cluster related to a possible rebalance.")).mapEmpty();
}
// Get associated Kafka cluster state
return kafkaOperator.getAsync(clusterNamespace, clusterName).compose(kafka -> {
if (kafka == null) {
LOGGER.warnCr(reconciliation, "Kafka resource '{}' identified by label '{}' does not exist in namespace {}.", clusterName, Labels.STRIMZI_CLUSTER_LABEL, clusterNamespace);
return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), new NoSuchResourceException("Kafka resource '" + clusterName + "' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace " + clusterNamespace + ".")).mapEmpty();
} else if (!Util.matchesSelector(kafkaSelector, kafka)) {
LOGGER.debugCr(reconciliation, "{} {} in namespace {} belongs to a Kafka cluster {} which does not match label selector {} and will be ignored", kind(), kafkaRebalance.getMetadata().getName(), clusterNamespace, clusterName, kafkaSelector.get().getMatchLabels());
return Future.succeededFuture();
} else if (kafka.getSpec().getCruiseControl() == null) {
LOGGER.warnCr(reconciliation, "Kafka resource lacks 'cruiseControl' declaration : No deployed Cruise Control for doing a rebalance.");
return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), new InvalidResourceException("Kafka resource lacks 'cruiseControl' declaration " + ": No deployed Cruise Control for doing a rebalance.")).mapEmpty();
}
if (kafka.getSpec().getKafka().getStorage() instanceof JbodStorage) {
usingJbodStorage = true;
}
String ccSecretName = CruiseControlResources.secretName(clusterName);
String ccApiSecretName = CruiseControlResources.apiSecretName(clusterName);
Future<Secret> ccSecretFuture = secretOperations.getAsync(clusterNamespace, ccSecretName);
Future<Secret> ccApiSecretFuture = secretOperations.getAsync(clusterNamespace, ccApiSecretName);
return CompositeFuture.join(ccSecretFuture, ccApiSecretFuture).compose(compositeFuture -> {
Secret ccSecret = compositeFuture.resultAt(0);
if (ccSecret == null) {
return Future.failedFuture(Util.missingSecretException(clusterNamespace, ccSecretName));
}
Secret ccApiSecret = compositeFuture.resultAt(1);
if (ccApiSecret == null) {
return Future.failedFuture(Util.missingSecretException(clusterNamespace, ccApiSecretName));
}
CruiseControlConfiguration c = new CruiseControlConfiguration(reconciliation, kafka.getSpec().getCruiseControl().getConfig().entrySet());
boolean apiAuthEnabled = CruiseControl.isApiAuthEnabled(c);
boolean apiSslEnabled = CruiseControl.isApiSslEnabled(c);
CruiseControlApi apiClient = cruiseControlClientProvider(ccSecret, ccApiSecret, apiAuthEnabled, apiSslEnabled);
// get latest KafkaRebalance state as it may have changed
return kafkaRebalanceOperator.getAsync(kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName()).compose(currentKafkaRebalance -> {
KafkaRebalanceStatus kafkaRebalanceStatus = currentKafkaRebalance.getStatus();
KafkaRebalanceState currentState;
// cluster rebalance is new or it is in one of the others states
if (kafkaRebalanceStatus == null || kafkaRebalanceStatus.getConditions().stream().filter(cond -> "ReconciliationPaused".equals(cond.getType())).findAny().isPresent()) {
currentState = KafkaRebalanceState.New;
} else {
String rebalanceStateType = rebalanceStateConditionType(kafkaRebalanceStatus);
if (rebalanceStateType == null) {
throw new RuntimeException("Unable to find KafkaRebalance State in current KafkaRebalance status");
}
currentState = KafkaRebalanceState.valueOf(rebalanceStateType);
}
// Check annotation
KafkaRebalanceAnnotation rebalanceAnnotation = rebalanceAnnotation(reconciliation, currentKafkaRebalance);
return reconcile(reconciliation, cruiseControlHost(clusterName, clusterNamespace), apiClient, currentKafkaRebalance, currentState, rebalanceAnnotation).mapEmpty();
}, exception -> Future.failedFuture(exception).mapEmpty());
});
}, exception -> updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), exception).mapEmpty());
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateJbodDiskCapacity.
/**
* Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @param idx Index of the broker
* @return Disk capacity configuration value as a JsonObject for broker idx
*/
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
JsonObject json = new JsonObject();
String size = "";
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
if (volume instanceof PersistentClaimStorage) {
size = ((PersistentClaimStorage) volume).getSize();
} else if (volume instanceof EphemeralStorage) {
size = ((EphemeralStorage) volume).getSizeLimit();
}
json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
}
return json;
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateCapacityConfig.
/**
* Generate a capacity configuration for cluster
*
* @return Cruise Control capacity configuration as a String
*/
public String generateCapacityConfig() {
JsonArray brokerList = new JsonArray();
if (storage instanceof JbodStorage) {
// the broker pod index in their names.
for (int idx = 0; idx < replicas; idx++) {
JsonObject diskConfig = new JsonObject().put("DISK", generateJbodDiskCapacity(storage, idx));
JsonObject brokerEntry = generateBrokerCapacity(idx, diskConfig, "Capacity for Broker " + idx);
brokerList.add(brokerEntry);
}
} else {
// A capacity configuration for a cluster without a JBOD configuration
// can rely on a generic broker entry for all brokers
JsonObject diskConfig = new JsonObject().put("DISK", String.valueOf(diskMiB));
JsonObject defaultBrokerCapacity = generateBrokerCapacity(DEFAULT_BROKER_ID, diskConfig, DEFAULT_BROKER_DOC);
brokerList.add(defaultBrokerCapacity);
}
JsonObject config = new JsonObject();
config.put("brokerCapacities", brokerList);
return config.encodePrettily();
}
Aggregations