use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi-kafka-operator by strimzi.
the class CruiseControlTest method testBrokerCapacities.
@ParallelTest
public void testBrokerCapacities() {
// Test user defined capacities
BrokerCapacity userDefinedBrokerCapacity = new BrokerCapacity();
userDefinedBrokerCapacity.setInboundNetwork("50000KB/s");
userDefinedBrokerCapacity.setOutboundNetwork("50000KB/s");
CruiseControlSpec cruiseControlSpec = new CruiseControlSpecBuilder().withImage(ccImage).withBrokerCapacity(userDefinedBrokerCapacity).build();
Kafka resource = createKafka(cruiseControlSpec);
Capacity capacity = new Capacity(resource.getSpec(), kafkaStorage);
assertThat(getCapacityConfigurationFromEnvVar(resource, ENV_VAR_CRUISE_CONTROL_CAPACITY_CONFIGURATION), is(capacity.generateCapacityConfig()));
// Test generated disk capacity
JbodStorage jbodStorage = new JbodStorage();
List<SingleVolumeStorage> volumes = new ArrayList<>();
PersistentClaimStorage p1 = new PersistentClaimStorage();
p1.setId(0);
p1.setSize("50Gi");
volumes.add(p1);
PersistentClaimStorage p2 = new PersistentClaimStorage();
p2.setId(1);
volumes.add(p2);
jbodStorage.setVolumes(volumes);
resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().editKafka().withVersion(version).withStorage(jbodStorage).endKafka().withCruiseControl(cruiseControlSpec).endSpec().build();
capacity = new Capacity(resource.getSpec(), jbodStorage);
assertThat(getCapacityConfigurationFromEnvVar(resource, ENV_VAR_CRUISE_CONTROL_CAPACITY_CONFIGURATION), is(capacity.generateCapacityConfig()));
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateJbodDiskCapacity.
/**
* Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @param idx Index of the broker
* @return Disk capacity configuration value as a JsonObject for broker idx
*/
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
JsonObject json = new JsonObject();
String size = "";
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
if (volume instanceof PersistentClaimStorage) {
size = ((PersistentClaimStorage) volume).getSize();
} else if (volume instanceof EphemeralStorage) {
size = ((EphemeralStorage) volume).getSizeLimit();
}
json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
}
return json;
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateCapacityConfig.
/**
* Generate a capacity configuration for cluster
*
* @return Cruise Control capacity configuration as a String
*/
public String generateCapacityConfig() {
JsonArray brokerList = new JsonArray();
if (storage instanceof JbodStorage) {
// the broker pod index in their names.
for (int idx = 0; idx < replicas; idx++) {
JsonObject diskConfig = new JsonObject().put("DISK", generateJbodDiskCapacity(storage, idx));
JsonObject brokerEntry = generateBrokerCapacity(idx, diskConfig, "Capacity for Broker " + idx);
brokerList.add(brokerEntry);
}
} else {
// A capacity configuration for a cluster without a JBOD configuration
// can rely on a generic broker entry for all brokers
JsonObject diskConfig = new JsonObject().put("DISK", String.valueOf(diskMiB));
JsonObject defaultBrokerCapacity = generateBrokerCapacity(DEFAULT_BROKER_ID, diskConfig, DEFAULT_BROKER_DOC);
brokerList.add(defaultBrokerCapacity);
}
JsonObject config = new JsonObject();
config.put("brokerCapacities", brokerList);
return config.encodePrettily();
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class CruiseControlTest method testBrokerCapacities.
@ParallelTest
public void testBrokerCapacities() {
// Test user defined capacities
BrokerCapacity userDefinedBrokerCapacity = new BrokerCapacity();
userDefinedBrokerCapacity.setInboundNetwork("50000KB/s");
userDefinedBrokerCapacity.setOutboundNetwork("50000KB/s");
CruiseControlSpec cruiseControlSpec = new CruiseControlSpecBuilder().withImage(ccImage).withBrokerCapacity(userDefinedBrokerCapacity).build();
Kafka resource = createKafka(cruiseControlSpec);
Capacity capacity = new Capacity(resource.getSpec(), kafkaStorage);
assertThat(getCapacityConfigurationFromEnvVar(resource, ENV_VAR_CRUISE_CONTROL_CAPACITY_CONFIGURATION), is(capacity.generateCapacityConfig()));
// Test generated disk capacity
JbodStorage jbodStorage = new JbodStorage();
List<SingleVolumeStorage> volumes = new ArrayList<>();
PersistentClaimStorage p1 = new PersistentClaimStorage();
p1.setId(0);
p1.setSize("50Gi");
volumes.add(p1);
PersistentClaimStorage p2 = new PersistentClaimStorage();
p2.setId(1);
volumes.add(p2);
jbodStorage.setVolumes(volumes);
resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().editKafka().withVersion(version).withStorage(jbodStorage).endKafka().withCruiseControl(cruiseControlSpec).endSpec().build();
capacity = new Capacity(resource.getSpec(), jbodStorage);
assertThat(getCapacityConfigurationFromEnvVar(resource, ENV_VAR_CRUISE_CONTROL_CAPACITY_CONFIGURATION), is(capacity.generateCapacityConfig()));
}
use of io.strimzi.api.kafka.model.storage.JbodStorage in project strimzi by strimzi.
the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.
/**
* Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
* not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
*/
@ParallelNamespaceTest
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String continuousTopicName = "continuous-topic";
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
final String producerName = "hello-world-producer";
final String consumerName = "hello-world-consumer";
PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
final String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName);
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName);
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
// ##############################
String userName = KafkaUserUtils.generateRandomNameOfKafkaUser();
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// Add Jbod volume to Kafka => triggers RU
LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().add(vol1);
}, namespaceName);
// Wait util it rolls
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// Remove Jbod volume to Kafka => triggers RU
LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().remove(vol1);
}, namespaceName);
// Wait util it rolls
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, namespaceName, continuousClientsMessageCount);
// ##############################
}
Aggregations