use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class CruiseControlST method testCruiseControlIntraBrokerBalancing.
@ParallelNamespaceTest
@KRaftNotSupported("JBOD is not supported by KRaft mode and is used in this test case.")
void testCruiseControlIntraBrokerBalancing(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
String diskSize = "6Gi";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSize).build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSize).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editOrNewSpec().editKafka().withStorage(jbodStorage).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editOrNewSpec().withRebalanceDisk(true).endSpec().build());
KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaRebalanceState.ProposalReady);
LOGGER.info("Checking status of KafkaRebalance");
// The provision status should be "UNDECIDED" when doing an intra-broker disk balance because it is irrelevant to the provision status
KafkaRebalanceStatus kafkaRebalanceStatus = KafkaRebalanceResource.kafkaRebalanceClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus();
assertThat(kafkaRebalanceStatus.getOptimizationResult().get("provisionStatus").toString(), containsString("UNDECIDED"));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method data.
public static Iterable<Params> data() {
boolean[] metricsOpenShiftAndEntityOperatorOptions = { true, false };
SingleVolumeStorage[] storageConfig = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build() };
List<Map<String, Object>> configs = asList(null, emptyMap(), singletonMap("foo", "bar"));
List<Params> result = new ArrayList<>();
for (boolean metricsOpenShiftAndEntityOperator : metricsOpenShiftAndEntityOperatorOptions) {
for (Map<String, Object> config : configs) {
for (SingleVolumeStorage storage : storageConfig) {
EntityOperatorSpec eoConfig;
if (metricsOpenShiftAndEntityOperator) {
eoConfig = new EntityOperatorSpecBuilder().withUserOperator(new EntityUserOperatorSpecBuilder().build()).withTopicOperator(new EntityTopicOperatorSpecBuilder().build()).build();
} else {
eoConfig = null;
}
List<GenericKafkaListener> listeners = new ArrayList<>(3);
listeners.add(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build());
listeners.add(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
if (metricsOpenShiftAndEntityOperator) {
// On OpenShift, use Routes
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.ROUTE).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
} else {
// On Kube, use nodeports
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
}
result.add(new Params(metricsOpenShiftAndEntityOperator, metricsOpenShiftAndEntityOperator, listeners, config, config, storage, storage, eoConfig));
}
}
}
return result;
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class JbodStorageMockTest method testReconcileWithNewVolumeAddedToJbodStorage.
@Test
public void testReconcileWithNewVolumeAddedToJbodStorage(VertxTestContext context) {
Checkpoint async = context.checkpoint();
// Add a new volume to Jbod Storage
volumes.add(new PersistentClaimStorageBuilder().withId(2).withDeleteClaim(false).withSize("100Gi").build());
Kafka kafkaWithNewJbodVolume = new KafkaBuilder(kafka).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(volumes).build()).endKafka().endSpec().build();
Set<String> expectedPvcs = expectedPvcs(kafka);
Set<String> expectedPvcsWithNewJbodStorageVolume = expectedPvcs(kafkaWithNewJbodVolume);
// reconcile for kafka cluster creation
operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcs));
}))).compose(v -> {
Crds.kafkaOperation(client).inNamespace(NAMESPACE).withName(NAME).patch(kafkaWithNewJbodVolume);
// reconcile kafka cluster with new Jbod storage
return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcsWithNewJbodStorageVolume));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockTest method data.
public static Iterable<KafkaAssemblyOperatorMockTest.Params> data() {
int[] replicas = { 1, 3 };
int[] storageOptions = { 0, 1, 2 };
Storage[] kafkaStorageConfigs = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(false).build() };
SingleVolumeStorage[] zkStorageConfigs = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(false).build() };
ResourceRequirements[] resources = { new ResourceRequirementsBuilder().addToLimits("cpu", new Quantity("5000m")).addToLimits("memory", new Quantity("5000m")).addToRequests("cpu", new Quantity("5000")).addToRequests("memory", new Quantity("5000m")).build() };
List<KafkaAssemblyOperatorMockTest.Params> result = new ArrayList();
for (int replicaCount : replicas) {
for (int storage : storageOptions) {
for (ResourceRequirements resource : resources) {
result.add(new KafkaAssemblyOperatorMockTest.Params(replicaCount, zkStorageConfigs[storage], replicaCount, kafkaStorageConfigs[storage], resource));
}
}
}
return result;
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder in project strimzi by strimzi.
the class ZookeeperPodSetTest method testPodSet.
@ParallelTest
public void testPodSet() {
ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet ps = zc.generatePodSet(3, true, null, null, Map.of());
assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER)));
assertThat(ps.getMetadata().getLabels().entrySet().containsAll(zc.getLabelsWithStrimziName(zc.getName(), null).toMap().entrySet()), is(true));
assertThat(ps.getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new PersistentClaimStorageBuilder().withSize("100Gi").withDeleteClaim(false).build())));
assertThat(ps.getMetadata().getOwnerReferences().size(), is(1));
assertThat(ps.getMetadata().getOwnerReferences().get(0), is(zc.createOwnerReference()));
assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap()));
assertThat(ps.getSpec().getPods().size(), is(3));
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getMetadata().getLabels().entrySet().containsAll(zc.getLabelsWithStrimziNameAndPodName(zc.getName(), pod.getMetadata().getName(), null).withStatefulSetPod(pod.getMetadata().getName()).withStrimziPodSetController(zc.getName()).toMap().entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().size(), is(1));
assertThat(pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION), is(notNullValue()));
assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName()));
assertThat(pod.getSpec().getSubdomain(), is(zc.getHeadlessServiceName()));
assertThat(pod.getSpec().getRestartPolicy(), is("Always"));
assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L));
assertThat(pod.getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElse(null).getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(5));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(15));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(5));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(15));
assertThat(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/var/lib/zookeeper"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is("zookeeper-metrics-and-logging"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT));
// Config
OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS);
OrderedProperties actual = new OrderedProperties().addStringPairs(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION));
assertThat(actual, is(expectedConfig));
}
}
Aggregations