use of io.strimzi.controller.cluster.model.KafkaCluster in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method updateCluster.
private void updateCluster(TestContext context, ConfigMap originalCm, ConfigMap clusterCm, boolean kafkaRolling, boolean zkRolling) {
KafkaCluster originalKafkaCluster = KafkaCluster.fromConfigMap(originalCm);
KafkaCluster updatedKafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromConfigMap(originalCm);
ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
TopicController originalTopicController = TopicController.fromConfigMap(originalCm);
// create CM, Service, headless service, statefulset and so on
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
// Mock CM get
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ConfigMap metricsCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaCluster.metricConfigsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
when(mockCmOps.get(clusterCmNamespace, KafkaCluster.metricConfigsName(clusterCmName))).thenReturn(metricsCm);
ConfigMap zkMetricsCm = new ConfigMapBuilder().withNewMetadata().withName(ZookeeperCluster.zookeeperMetricsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
when(mockCmOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperMetricsName(clusterCmName))).thenReturn(zkMetricsCm);
// Mock Service gets
when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.headlessName(clusterCmName))).thenReturn(originalKafkaCluster.generateHeadlessService());
when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperHeadlessName(clusterCmName))).thenReturn(originalZookeeperCluster.generateHeadlessService());
when(mockServiceOps.endpointReadiness(eq(clusterCmNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock StatefulSet get
when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateStatefulSet(openShift));
when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalZookeeperCluster.generateStatefulSet(openShift));
// Mock Deployment get
if (originalTopicController != null) {
when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(originalTopicController.generateDeployment());
}
// Mock CM patch
Set<String> metricsCms = set();
doAnswer(invocation -> {
metricsCms.add(invocation.getArgument(1));
return Future.succeededFuture();
}).when(mockCmOps).reconcile(eq(clusterCmNamespace), anyString(), any());
// Mock Service patch (both service and headless service
ArgumentCaptor<String> patchedServicesCaptor = ArgumentCaptor.forClass(String.class);
when(mockServiceOps.reconcile(eq(clusterCmNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture());
// Mock StatefulSet patch
when(mockZsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(zkRolling)));
when(mockKsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(kafkaRolling)));
// Mock StatefulSet rollingUpdate
Set<String> rollingRestarts = set();
// Mock StatefulSet scaleUp
ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
when(mockZsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
// Mock StatefulSet scaleDown
ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
when(mockZsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockZsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
if (!zkRolling) {
context.fail("Unexpected rolling update");
}
return Future.succeededFuture();
});
// ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
when(mockKsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
// Mock StatefulSet scaleDown
// ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
when(mockKsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockKsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
if (!kafkaRolling) {
context.fail("Unexpected rolling update");
}
return Future.succeededFuture();
});
// Mock Deployment patch
ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
when(mockDepOps.reconcile(anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture());
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to update a KafkaCluster based on this CM
Async async = context.async();
ops.createOrUpdate(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), clusterCm, createResult -> {
if (createResult.failed())
createResult.cause().printStackTrace();
context.assertTrue(createResult.succeeded());
// rolling restart
Set<String> expectedRollingRestarts = set();
if (KafkaSetOperator.needsRollingUpdate(new StatefulSetDiff(originalKafkaCluster.generateStatefulSet(openShift), updatedKafkaCluster.generateStatefulSet(openShift)))) {
expectedRollingRestarts.add(originalKafkaCluster.getName());
}
if (ZookeeperSetOperator.needsRollingUpdate(new StatefulSetDiff(originalZookeeperCluster.generateStatefulSet(openShift), updatedZookeeperCluster.generateStatefulSet(openShift)))) {
expectedRollingRestarts.add(originalZookeeperCluster.getName());
}
// No metrics config => no CMs created
verify(mockCmOps, never()).createOrUpdate(any());
verifyNoMoreInteractions(mockPvcOps);
async.complete();
});
}
use of io.strimzi.controller.cluster.model.KafkaCluster in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method deleteCluster.
private void deleteCluster(TestContext context, ConfigMap clusterCm) {
ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
KafkaCluster kafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
TopicController topicController = TopicController.fromConfigMap(clusterCm);
// create CM, Service, headless service, statefulset
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
StatefulSet kafkaSs = kafkaCluster.generateStatefulSet(true);
StatefulSet zkSs = zookeeperCluster.generateStatefulSet(true);
when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(kafkaSs);
when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(zkSs);
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ArgumentCaptor<String> serviceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> ssCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> metricsCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(eq(clusterCmNamespace), metricsCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockServiceOps.reconcile(eq(clusterCmNamespace), serviceCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockKsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockZsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> pvcCaptor = ArgumentCaptor.forClass(String.class);
when(mockPvcOps.reconcile(eq(clusterCmNamespace), pvcCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
when(mockDepOps.reconcile(eq(clusterCmNamespace), depCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
if (topicController != null) {
Deployment tcDep = topicController.generateDeployment();
when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(tcDep);
}
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to delete a KafkaCluster based on this CM
Async async = context.async();
ops.delete(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), createResult -> {
context.assertTrue(createResult.succeeded());
/*Set<String> metricsNames = new HashSet<>();
if (kafkaCluster.isMetricsEnabled()) {
metricsNames.add(KafkaCluster.metricConfigsName(clusterCmName));
}
if (zookeeperCluster.isMetricsEnabled()) {
metricsNames.add(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
}
context.assertEquals(metricsNames, captured(metricsCaptor));*/
verify(mockZsOps).reconcile(eq(clusterCmNamespace), eq(ZookeeperCluster.zookeeperClusterName(clusterCmName)), isNull());
context.assertEquals(set(ZookeeperCluster.zookeeperHeadlessName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName), KafkaCluster.kafkaClusterName(clusterCmName), KafkaCluster.headlessName(clusterCmName)), captured(serviceCaptor));
// verify deleted Statefulsets
context.assertEquals(set(zookeeperCluster.getName(), kafkaCluster.getName()), captured(ssCaptor));
// PvcOperations only used for deletion
Set<String> expectedPvcDeletions = new HashSet<>();
for (int i = 0; deleteClaim && i < kafkaCluster.getReplicas(); i++) {
expectedPvcDeletions.add("data-" + clusterCmName + "-kafka-" + i);
}
for (int i = 0; deleteClaim && i < zookeeperCluster.getReplicas(); i++) {
expectedPvcDeletions.add("data-" + clusterCmName + "-zookeeper-" + i);
}
context.assertEquals(expectedPvcDeletions, captured(pvcCaptor));
// if topic controller configuration was defined in the CM
if (topicController != null) {
Set<String> expectedDepNames = new HashSet<>();
expectedDepNames.add(TopicController.topicControllerName(clusterCmName));
context.assertEquals(expectedDepNames, captured(depCaptor));
}
async.complete();
});
}
use of io.strimzi.controller.cluster.model.KafkaCluster in project strimzi by strimzi.
the class KafkaAssemblyOperator method createOrUpdateKafka.
private final Future<Void> createOrUpdateKafka(Reconciliation reconciliation, ConfigMap assemblyCm) {
String namespace = assemblyCm.getMetadata().getNamespace();
String name = assemblyCm.getMetadata().getName();
log.info("{}: create/update kafka {}", reconciliation, name);
KafkaCluster kafka = KafkaCluster.fromConfigMap(assemblyCm);
Service service = kafka.generateService();
Service headlessService = kafka.generateHeadlessService();
ConfigMap metricsConfigMap = kafka.generateMetricsConfigMap();
StatefulSet statefulSet = kafka.generateStatefulSet(isOpenShift);
Future<Void> chainFuture = Future.future();
kafkaSetOperations.scaleDown(namespace, kafka.getName(), kafka.getReplicas()).compose(scale -> serviceOperations.reconcile(namespace, kafka.getName(), service)).compose(i -> serviceOperations.reconcile(namespace, kafka.getHeadlessName(), headlessService)).compose(i -> configMapOperations.reconcile(namespace, kafka.getMetricsConfigName(), metricsConfigMap)).compose(i -> kafkaSetOperations.reconcile(namespace, kafka.getName(), statefulSet)).compose(diffs -> {
if (diffs instanceof ReconcileResult.Patched && ((ReconcileResult.Patched<Boolean>) diffs).differences()) {
return kafkaSetOperations.rollingUpdate(namespace, kafka.getName());
} else {
return Future.succeededFuture();
}
}).compose(i -> kafkaSetOperations.scaleUp(namespace, kafka.getName(), kafka.getReplicas())).compose(scale -> serviceOperations.endpointReadiness(namespace, service, 1_000, operationTimeoutMs)).compose(i -> serviceOperations.endpointReadiness(namespace, headlessService, 1_000, operationTimeoutMs)).compose(chainFuture::complete, chainFuture);
return chainFuture;
}
use of io.strimzi.controller.cluster.model.KafkaCluster in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method createCluster.
private void createCluster(TestContext context, ConfigMap clusterCm) {
// create CM, Service, headless service, statefulset and so on
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
// Create a CM
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
when(mockServiceOps.endpointReadiness(anyString(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<StatefulSet> ssCaptor = ArgumentCaptor.forClass(StatefulSet.class);
when(mockZsOps.reconcile(anyString(), anyString(), ssCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
when(mockZsOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null));
when(mockZsOps.rollingUpdate(anyString(), anyString())).thenReturn(Future.succeededFuture());
when(mockZsOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockKsOps.reconcile(anyString(), anyString(), ssCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
when(mockKsOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null));
when(mockKsOps.rollingUpdate(anyString(), anyString())).thenReturn(Future.succeededFuture());
when(mockKsOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
// when(mockSsOps.readiness(any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// when(mockPodOps.readiness(any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// when(mockEndpointOps.readiness(any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
KafkaCluster kafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
TopicController topicController = TopicController.fromConfigMap(clusterCm);
ArgumentCaptor<ConfigMap> metricsCaptor = ArgumentCaptor.forClass(ConfigMap.class);
ArgumentCaptor<String> metricsNameCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(anyString(), metricsNameCaptor.capture(), metricsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to create a KafkaCluster based on this CM
Async async = context.async();
ops.createOrUpdate(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), clusterCm, createResult -> {
if (createResult.failed()) {
createResult.cause().printStackTrace();
}
context.assertTrue(createResult.succeeded());
// No metrics config => no CMs created
Set<String> metricsNames = new HashSet<>();
if (kafkaCluster.isMetricsEnabled()) {
metricsNames.add(KafkaCluster.metricConfigsName(clusterCmName));
}
if (zookeeperCluster.isMetricsEnabled()) {
metricsNames.add(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
}
/*
Map<String, ConfigMap> cmsByName = new HashMap<>();
Iterator<ConfigMap> it2 = metricsCaptor.getAllValues().iterator();
for (Iterator<String> it = metricsNameCaptor.getAllValues().iterator(); it.hasNext(); ) {
cmsByName.put(it.next(), it2.next());
}
context.assertEquals(metricsNames, cmsByName.keySet(),
"Unexpected metrics ConfigMaps");
if (kafkaCluster.isMetricsEnabled()) {
ConfigMap kafkaMetricsCm = cmsByName.get(KafkaCluster.metricConfigsName(clusterCmName));
context.assertEquals(ResourceUtils.labels(Labels.STRIMZI_TYPE_LABEL, "kafka",
Labels.STRIMZI_CLUSTER_LABEL, clusterCmName,
"my-user-label", "cromulent"), kafkaMetricsCm.getMetadata().getLabels());
}
if (zookeeperCluster.isMetricsEnabled()) {
ConfigMap zookeeperMetricsCm = cmsByName.get(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
context.assertEquals(ResourceUtils.labels(Labels.STRIMZI_TYPE_LABEL, "zookeeper",
Labels.STRIMZI_CLUSTER_LABEL, clusterCmName,
"my-user-label", "cromulent"), zookeeperMetricsCm.getMetadata().getLabels());
}*/
// We expect a headless and headful service
List<Service> capturedServices = serviceCaptor.getAllValues();
context.assertEquals(4, capturedServices.size());
context.assertEquals(set(KafkaCluster.kafkaClusterName(clusterCmName), KafkaCluster.headlessName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName), ZookeeperCluster.zookeeperHeadlessName(clusterCmName)), capturedServices.stream().map(svc -> svc.getMetadata().getName()).collect(Collectors.toSet()));
// Assertions on the statefulset
List<StatefulSet> capturedSs = ssCaptor.getAllValues();
// We expect a statefulSet for kafka and zookeeper...
context.assertEquals(set(KafkaCluster.kafkaClusterName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName)), capturedSs.stream().map(ss -> ss.getMetadata().getName()).collect(Collectors.toSet()));
// if topic controller configuration was defined in the CM
if (topicController != null) {
List<Deployment> capturedDeps = depCaptor.getAllValues();
context.assertEquals(1, capturedDeps.size());
context.assertEquals(TopicController.topicControllerName(clusterCmName), capturedDeps.get(0).getMetadata().getName());
}
// PvcOperations only used for deletion
verifyNoMoreInteractions(mockPvcOps);
async.complete();
});
}
use of io.strimzi.controller.cluster.model.KafkaCluster in project strimzi by strimzi.
the class KafkaAssemblyOperator method deleteKafka.
private final Future<CompositeFuture> deleteKafka(Reconciliation reconciliation) {
String namespace = reconciliation.namespace();
String name = reconciliation.assemblyName();
log.info("{}: delete kafka {}", reconciliation, name);
StatefulSet ss = kafkaSetOperations.get(namespace, KafkaCluster.kafkaClusterName(name));
final KafkaCluster kafka = ss == null ? null : KafkaCluster.fromAssembly(ss, namespace, name);
boolean deleteClaims = kafka != null && kafka.getStorage().type() == Storage.StorageType.PERSISTENT_CLAIM && kafka.getStorage().isDeleteClaim();
List<Future> result = new ArrayList<>(4 + (deleteClaims ? kafka.getReplicas() : 0));
result.add(configMapOperations.reconcile(namespace, KafkaCluster.metricConfigsName(name), null));
result.add(serviceOperations.reconcile(namespace, KafkaCluster.kafkaClusterName(name), null));
result.add(serviceOperations.reconcile(namespace, KafkaCluster.headlessName(name), null));
result.add(kafkaSetOperations.reconcile(namespace, KafkaCluster.kafkaClusterName(name), null));
if (deleteClaims) {
for (int i = 0; i < kafka.getReplicas(); i++) {
result.add(pvcOperations.reconcile(namespace, kafka.getPersistentVolumeClaimName(i), null));
}
}
return CompositeFuture.join(result);
}
Aggregations