use of io.strimzi.operator.common.MetricsAndLogging in project strimzi by strimzi.
the class KafkaMirrorMaker2ClusterTest method testMetricsConfigMap.
@ParallelTest
public void testMetricsConfigMap() {
ConfigMap metricsCm = kmm2.generateMetricsAndLogConfigMap(new MetricsAndLogging(metricsCM, null));
checkMetricsConfigMap(metricsCm);
}
use of io.strimzi.operator.common.MetricsAndLogging in project strimzi by strimzi.
the class KafkaMirrorMakerClusterTest method testMetricsConfigMap.
@ParallelTest
public void testMetricsConfigMap() {
ConfigMap metricsCm = mm.generateMetricsAndLogConfigMap(new MetricsAndLogging(metricsCM, null));
checkMetricsConfigMap(metricsCm);
}
use of io.strimzi.operator.common.MetricsAndLogging in project strimzi by strimzi.
the class KafkaClusterTest method testMetricsConfigMap.
@ParallelTest
public void testMetricsConfigMap() {
ConfigMap metricsCm = kc.generateMetricsAndLogConfigMap(new MetricsAndLogging(metricsCM, null));
checkMetricsConfigMap(metricsCm);
checkOwnerReference(kc.createOwnerReference(), metricsCm);
}
use of io.strimzi.operator.common.MetricsAndLogging in project strimzi-kafka-operator by strimzi.
the class KafkaReconciler method perBrokerKafkaConfiguration.
/**
* Generates and creates the ConfigMaps with per-broker configuration for Kafka brokers used in PodSets. It will
* also delete the ConfigMaps for any scaled-down brokers (scale down is done before this is called in the
* reconciliation)
*
* @param metricsAndLogging Metrics and Logging configuration
*
* @return Future which completes when the Kafka Configuration is prepared
*/
protected Future<Void> perBrokerKafkaConfiguration(MetricsAndLogging metricsAndLogging) {
return configMapOperator.listAsync(reconciliation.namespace(), kafka.getSelectorLabels()).compose(existingConfigMaps -> {
// This is used during Kafka rolling updates -> we have to store it for later
this.logging = kafka.loggingConfiguration(kafka.getLogging(), metricsAndLogging.getLoggingCm());
this.loggingHash = Util.hashStub(Util.getLoggingDynamicallyUnmodifiableEntries(logging));
List<ConfigMap> desiredConfigMaps = kafka.generatePerBrokerConfigurationConfigMaps(metricsAndLogging, listenerReconciliationResults.advertisedHostnames, listenerReconciliationResults.advertisedPorts, featureGates.controlPlaneListenerEnabled());
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> ops = new ArrayList<>(existingConfigMaps.size() + kafka.getReplicas());
// Delete all existing ConfigMaps which are not desired and are not the shared config map
List<String> desiredNames = new ArrayList<>(desiredConfigMaps.size() + 1);
// We do not want to delete the shared ConfigMap, so we add it here
desiredNames.add(kafka.getAncillaryConfigMapName());
desiredNames.addAll(desiredConfigMaps.stream().map(cm -> cm.getMetadata().getName()).collect(Collectors.toList()));
for (ConfigMap cm : existingConfigMaps) {
// We delete the cms not on the desired names list
if (!desiredNames.contains(cm.getMetadata().getName())) {
ops.add(configMapOperator.deleteAsync(reconciliation, reconciliation.namespace(), cm.getMetadata().getName(), true));
}
}
// Create / update the desired config maps
for (ConfigMap cm : desiredConfigMaps) {
String cmName = cm.getMetadata().getName();
int brokerId = getPodIndexFromPodName(cmName);
// The advertised hostname and port might change. If they change, we need to roll the pods.
// Here we collect their hash to trigger the rolling update. For per-broker configuration,
// we need just the advertised hostnames / ports for given broker.
String brokerConfiguration = listenerReconciliationResults.advertisedHostnames.get(brokerId).entrySet().stream().map(kv -> kv.getKey() + "://" + kv.getValue()).sorted().collect(Collectors.joining(" "));
brokerConfiguration += listenerReconciliationResults.advertisedPorts.get(brokerId).entrySet().stream().map(kv -> kv.getKey() + "://" + kv.getValue()).sorted().collect(Collectors.joining(" "));
brokerConfiguration += cm.getData().getOrDefault(KafkaCluster.BROKER_LISTENERS_FILENAME, "");
// Changes to regular Kafka configuration are handled through the KafkaRoller which decides whether to roll the pod or not
// In addition to that, we have to handle changes to configuration unknown to Kafka -> different plugins (Authorization, Quotas etc.)
// This is captured here with the unknown configurations and the hash is used to roll the pod when it changes
KafkaConfiguration kc = KafkaConfiguration.unvalidated(reconciliation, cm.getData().getOrDefault(KafkaCluster.BROKER_CONFIGURATION_FILENAME, ""));
// We store hash of the broker configurations for later use in Pod and in rolling updates
this.brokerConfigurationHash.put(brokerId, Util.hashStub(brokerConfiguration + kc.unknownConfigsWithValues(kafka.getKafkaVersion()).toString()));
ops.add(configMapOperator.reconcile(reconciliation, reconciliation.namespace(), cmName, cm));
}
return CompositeFuture.join(ops).map((Void) null);
});
}
use of io.strimzi.operator.common.MetricsAndLogging in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorPodSetTest method testScaleDown.
/**
* Tests reconciliation with scale-down from 5 to 3 ZooKeeper pods
*
* @param context Test context
*/
@Test
public void testScaleDown(VertxTestContext context) {
Kafka oldKafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withReplicas(5).endZookeeper().editKafka().withReplicas(5).endKafka().endSpec().build();
ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, null);
KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSet(oldKafka.getSpec().getKafka().getReplicas(), false, null, null, brokerId -> null);
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture());
when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret()));
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS, true)));
ArgumentCaptor<String> cmReconciliationCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture());
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
// Zoo
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(oldZkPodSet));
ArgumentCaptor<StrimziPodSet> zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class);
when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
// Kafka
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(oldKafkaPodSet));
ArgumentCaptor<StrimziPodSet> kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class);
when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
StatefulSetOperator mockStsOps = supplier.stsOperations;
// Zoo STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(null));
// Kafka STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(null));
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture());
CrdOperator<KubernetesClient, Kafka, KafkaList> mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA));
when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
MockZooKeeperReconciler zr = new MockZooKeeperReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 5, CLUSTER_CA);
MockKafkaReconciler kr = new MockKafkaReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 5, CLUSTER_CA, CLIENTS_CA);
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), CERT_MANAGER, PASSWORD_GENERATOR, supplier, config, zr, kr);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Scale-down of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods.
assertThat(zkPodSetCaptor.getAllValues().size(), is(3));
// => first capture is from zkPodSet() with old replica count
assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(5));
// => second capture is from zkScalingDown() with new replica count
assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(4));
// => third capture is from zkScalingDown() with new replica count
assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3));
// Still one maybe-roll invocation
assertThat(zr.maybeRollZooKeeperInvocations, is(1));
// Scale-down of Kafka is done in one go => we should see two invocations (first from regular patching and second from scale-down)
assertThat(kafkaPodSetCaptor.getAllValues().size(), is(2));
// => first capture is from kafkaScaleDown() with old replica count
assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(3));
// => second capture is from kafkaPodSet() with new replica count
assertThat(kafkaPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(3));
// Still one maybe-roll invocation
assertThat(kr.maybeRollKafkaInvocations, is(1));
// CMs for all remaining pods are reconciled
assertThat(cmReconciliationCaptor.getAllValues().size(), is(3));
assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2")));
// The shared CM + the CMs for scaled down pods are deleted
assertThat(cmDeletionCaptor.getAllValues().size(), is(3));
assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-kafka-3", "my-cluster-kafka-4", "my-cluster-kafka-config")));
async.flag();
})));
}
Aggregations