use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaPersistentVolumes.
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaPersistentVolumes(Params params, VertxTestContext context) {
init(params);
assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Parameterized Test only runs for Params with Kafka Persistent storage");
String originalStorageClass = Storage.storageClass(kafkaStorage);
Checkpoint async = context.checkpoint();
initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
assertStorageClass(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalStorageClass);
// Try to update the storage class
String changedClass = originalStorageClass + "2";
Kafka patchedPersistenceKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withStorageClass(changedClass).withSize("123").endPersistentClaimStorage().endKafka().endSpec().build();
kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(patchedPersistenceKafka);
LOGGER.info("Updating with changed storage class");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
// Check the storage class was not changed
assertStorageClass(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalStorageClass);
async.flag();
})));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.
the class CruiseControlTest method testBrokerCapacities.
@ParallelTest
public void testBrokerCapacities() {
// Test user defined capacities
BrokerCapacity userDefinedBrokerCapacity = new BrokerCapacity();
userDefinedBrokerCapacity.setInboundNetwork("50000KB/s");
userDefinedBrokerCapacity.setOutboundNetwork("50000KB/s");
CruiseControlSpec cruiseControlSpec = new CruiseControlSpecBuilder().withImage(ccImage).withBrokerCapacity(userDefinedBrokerCapacity).build();
Kafka resource = createKafka(cruiseControlSpec);
Capacity capacity = new Capacity(resource.getSpec(), kafkaStorage);
assertThat(getCapacityConfigurationFromEnvVar(resource, ENV_VAR_CRUISE_CONTROL_CAPACITY_CONFIGURATION), is(capacity.generateCapacityConfig()));
// Test generated disk capacity
JbodStorage jbodStorage = new JbodStorage();
List<SingleVolumeStorage> volumes = new ArrayList<>();
PersistentClaimStorage p1 = new PersistentClaimStorage();
p1.setId(0);
p1.setSize("50Gi");
volumes.add(p1);
PersistentClaimStorage p2 = new PersistentClaimStorage();
p2.setId(1);
volumes.add(p2);
jbodStorage.setVolumes(volumes);
resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().editKafka().withVersion(version).withStorage(jbodStorage).endKafka().withCruiseControl(cruiseControlSpec).endSpec().build();
capacity = new Capacity(resource.getSpec(), jbodStorage);
assertThat(getCapacityConfigurationFromEnvVar(resource, ENV_VAR_CRUISE_CONTROL_CAPACITY_CONFIGURATION), is(capacity.generateCapacityConfig()));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateJbodDiskCapacity.
/**
* Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @param idx Index of the broker
* @return Disk capacity configuration value as a JsonObject for broker idx
*/
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
JsonObject json = new JsonObject();
String size = "";
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
if (volume instanceof PersistentClaimStorage) {
size = ((PersistentClaimStorage) volume).getSize();
} else if (volume instanceof EphemeralStorage) {
size = ((EphemeralStorage) volume).getSizeLimit();
}
json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
}
return json;
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.
the class StorageDiff method isOverrideChangeAllowed.
/**
* Validates the changes to the storage overrides and decides whether they are allowed or not. Allowed changes are
* those to nodes which will be added, removed or which do nto exist yet.
*
* @param current Current Storage configuration
* @param desired New storage configuration
* @param currentReplicas Current number of replicas
* @param desiredReplicas Desired number of replicas
* @return True if only allowed override changes were done, false othewise
*/
private boolean isOverrideChangeAllowed(Storage current, Storage desired, int currentReplicas, int desiredReplicas) {
List<PersistentClaimStorageOverride> currentOverrides = ((PersistentClaimStorage) current).getOverrides();
if (currentOverrides == null) {
currentOverrides = Collections.emptyList();
}
List<PersistentClaimStorageOverride> desiredOverrides = ((PersistentClaimStorage) desired).getOverrides();
if (desiredOverrides == null) {
desiredOverrides = Collections.emptyList();
}
// We care only about the nodes which existed before this reconciliation and will still exist after it
int existedAndWillExist = Math.min(currentReplicas, desiredReplicas);
for (int i = 0; i < existedAndWillExist; i++) {
int nodeId = i;
PersistentClaimStorageOverride currentOverride = currentOverrides.stream().filter(override -> override.getBroker() == nodeId).findFirst().orElse(null);
PersistentClaimStorageOverride desiredOverride = desiredOverrides.stream().filter(override -> override.getBroker() == nodeId).findFirst().orElse(null);
if (currentOverride != null && desiredOverride != null) {
// Both overrides exist but are not equal
if (!currentOverride.equals(desiredOverride)) {
return false;
}
} else if (currentOverride != null || desiredOverride != null) {
// One of them is null while the other is not null => they differ
return false;
}
}
return true;
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method getAdjustedMaxDataRetentionSize.
/**
* Get the effective volume size considering extra padding and the existing size
*/
private Quantity getAdjustedMaxDataRetentionSize(ManagedKafka managedKafka, Kafka current) {
long bytes = getPerBrokerBytes(managedKafka, current, managedKafka.getSpec().getCapacity().getMaxDataRetentionSize(), () -> this.config.getKafka().getVolumeSize());
// pad to give a margin before soft/hard limits kick in
bytes += getStoragePadding(managedKafka, current);
// strimzi won't allow the size to be reduced so scrape the size if possible
if (current != null) {
Storage storage = current.getSpec().getKafka().getStorage();
if (storage instanceof JbodStorage) {
JbodStorage jbodStorage = (JbodStorage) storage;
for (SingleVolumeStorage singleVolumeStorage : jbodStorage.getVolumes()) {
if (singleVolumeStorage instanceof PersistentClaimStorage && Integer.valueOf(JBOD_VOLUME_ID).equals(singleVolumeStorage.getId())) {
String existingSize = ((PersistentClaimStorage) singleVolumeStorage).getSize();
long existingBytes = Quantity.getAmountInBytes(Quantity.parse(existingSize)).longValue();
// TODO: if not changed a warning may be appropriate, but it would be best as a status condition
bytes = Math.max(existingBytes, bytes);
break;
}
}
}
}
return new Quantity(String.valueOf(bytes));
}
Aggregations