use of io.fabric8.kubernetes.api.Controller in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method deleteCluster.
private void deleteCluster(TestContext context, ConfigMap clusterCm) {
ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
KafkaCluster kafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
TopicController topicController = TopicController.fromConfigMap(clusterCm);
// create CM, Service, headless service, statefulset
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
StatefulSet kafkaSs = kafkaCluster.generateStatefulSet(true);
StatefulSet zkSs = zookeeperCluster.generateStatefulSet(true);
when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(kafkaSs);
when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(zkSs);
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ArgumentCaptor<String> serviceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> ssCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> metricsCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(eq(clusterCmNamespace), metricsCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockServiceOps.reconcile(eq(clusterCmNamespace), serviceCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockKsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockZsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> pvcCaptor = ArgumentCaptor.forClass(String.class);
when(mockPvcOps.reconcile(eq(clusterCmNamespace), pvcCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
when(mockDepOps.reconcile(eq(clusterCmNamespace), depCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
if (topicController != null) {
Deployment tcDep = topicController.generateDeployment();
when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(tcDep);
}
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to delete a KafkaCluster based on this CM
Async async = context.async();
ops.delete(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), createResult -> {
context.assertTrue(createResult.succeeded());
/*Set<String> metricsNames = new HashSet<>();
if (kafkaCluster.isMetricsEnabled()) {
metricsNames.add(KafkaCluster.metricConfigsName(clusterCmName));
}
if (zookeeperCluster.isMetricsEnabled()) {
metricsNames.add(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
}
context.assertEquals(metricsNames, captured(metricsCaptor));*/
verify(mockZsOps).reconcile(eq(clusterCmNamespace), eq(ZookeeperCluster.zookeeperClusterName(clusterCmName)), isNull());
context.assertEquals(set(ZookeeperCluster.zookeeperHeadlessName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName), KafkaCluster.kafkaClusterName(clusterCmName), KafkaCluster.headlessName(clusterCmName)), captured(serviceCaptor));
// verify deleted Statefulsets
context.assertEquals(set(zookeeperCluster.getName(), kafkaCluster.getName()), captured(ssCaptor));
// PvcOperations only used for deletion
Set<String> expectedPvcDeletions = new HashSet<>();
for (int i = 0; deleteClaim && i < kafkaCluster.getReplicas(); i++) {
expectedPvcDeletions.add("data-" + clusterCmName + "-kafka-" + i);
}
for (int i = 0; deleteClaim && i < zookeeperCluster.getReplicas(); i++) {
expectedPvcDeletions.add("data-" + clusterCmName + "-zookeeper-" + i);
}
context.assertEquals(expectedPvcDeletions, captured(pvcCaptor));
// if topic controller configuration was defined in the CM
if (topicController != null) {
Set<String> expectedDepNames = new HashSet<>();
expectedDepNames.add(TopicController.topicControllerName(clusterCmName));
context.assertEquals(expectedDepNames, captured(depCaptor));
}
async.complete();
});
}
use of io.fabric8.kubernetes.api.Controller in project strimzi by strimzi.
the class KafkaAssemblyOperator method createOrUpdateTopicController.
private final Future<ReconcileResult<Void>> createOrUpdateTopicController(Reconciliation reconciliation, ConfigMap assemblyCm) {
String namespace = assemblyCm.getMetadata().getNamespace();
String name = assemblyCm.getMetadata().getName();
log.info("{}: create/update topic controller {}", reconciliation, name);
TopicController topicController = TopicController.fromConfigMap(assemblyCm);
Deployment deployment = topicController != null ? topicController.generateDeployment() : null;
return deploymentOperations.reconcile(namespace, topicControllerName(name), deployment);
}
use of io.fabric8.kubernetes.api.Controller in project strimzi by strimzi.
the class ControllerTest method testReconcile_withCm_withKafka_noPrivate_configsReconcilable.
/**
* Test reconciliation when a cm has been added both in kafka and in k8s while the controller was down, and
* the topics are irreconcilably different: Kafka wins
*/
@Test
public void testReconcile_withCm_withKafka_noPrivate_configsReconcilable(TestContext context) {
Topic kubeTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).build();
Topic kafkaTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("unclean.leader.election.enable", "true")).build();
Topic privateTopic = null;
Topic mergedTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("unclean.leader.election.enable", "true", "cleanup.policy", "bar")).build();
Async async0 = context.async(2);
mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture());
mockKafka.createTopic(kafkaTopic, ar -> async0.countDown());
mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture());
ConfigMap cm = TopicSerialization.toConfigMap(kubeTopic, cmPredicate);
mockK8s.setCreateResponse(topicName.asMapName(), null);
mockK8s.createConfigMap(cm, ar -> async0.countDown());
mockK8s.setModifyResponse(topicName.asMapName(), null);
mockTopicStore.setCreateTopicResponse(topicName, null);
async0.await();
Async async = context.async(2);
controller.reconcile(cm, kubeTopic, kafkaTopic, privateTopic, reconcileResult -> {
assertSucceeded(context, reconcileResult);
mockTopicStore.assertExists(context, topicName);
mockK8s.assertExists(context, topicName.asMapName());
mockKafka.assertExists(context, topicName);
mockTopicStore.read(topicName, readResult -> {
assertSucceeded(context, readResult);
context.assertEquals(mergedTopic, readResult.result());
async.countDown();
});
mockK8s.getFromName(topicName.asMapName(), readResult -> {
assertSucceeded(context, readResult);
context.assertEquals(mergedTopic, TopicSerialization.fromConfigMap(readResult.result()));
async.countDown();
});
context.assertEquals(mergedTopic, mockKafka.getTopicState(topicName));
});
}
use of io.fabric8.kubernetes.api.Controller in project strimzi by strimzi.
the class ControllerTest method testOnConfigMapAdded_ClusterAuthorizationException.
/**
* 1. controller is notified that a ConfigMap is created
* 2. error when creating topic in kafka
*/
@Test
public void testOnConfigMapAdded_ClusterAuthorizationException(TestContext context) {
Exception createException = new ClusterAuthorizationException("");
Controller op = configMapAdded(context, createException, null);
// TODO check a k8s event got created
// TODO what happens when we subsequently reconcile?
}
use of io.fabric8.kubernetes.api.Controller in project strimzi by strimzi.
the class ControllerTest method testReconcile_withCm_withKafka_noPrivate_irreconcilable.
/**
* Test reconciliation when a cm has been added both in kafka and in k8s while the controller was down, and
* the topics are irreconcilably different: Kafka wins
*/
@Test
public void testReconcile_withCm_withKafka_noPrivate_irreconcilable(TestContext context) {
Topic kubeTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).build();
Topic kafkaTopic = new Topic.Builder(topicName.toString(), 12, (short) 2, map("cleanup.policy", "baz")).build();
Topic privateTopic = null;
Async async0 = context.async(2);
mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture());
mockKafka.createTopic(kafkaTopic, ar -> async0.countDown());
ConfigMap cm = TopicSerialization.toConfigMap(kubeTopic, cmPredicate);
mockK8s.setCreateResponse(topicName.asMapName(), null);
mockK8s.createConfigMap(cm, ar -> async0.countDown());
mockK8s.setModifyResponse(topicName.asMapName(), null);
mockTopicStore.setCreateTopicResponse(topicName, null);
async0.await();
Async async = context.async(2);
controller.reconcile(cm, kubeTopic, kafkaTopic, privateTopic, reconcileResult -> {
assertSucceeded(context, reconcileResult);
mockK8s.assertContainsEvent(context, e -> e.getMessage().contains("ConfigMap is incompatible with the topic metadata. " + "The topic metadata will be treated as canonical."));
mockTopicStore.assertExists(context, topicName);
mockK8s.assertExists(context, topicName.asMapName());
mockKafka.assertExists(context, topicName);
mockTopicStore.read(topicName, readResult -> {
assertSucceeded(context, readResult);
context.assertEquals(kafkaTopic, readResult.result());
async.countDown();
});
mockK8s.getFromName(topicName.asMapName(), readResult -> {
assertSucceeded(context, readResult);
context.assertEquals(kafkaTopic, TopicSerialization.fromConfigMap(readResult.result()));
async.countDown();
});
context.assertEquals(kafkaTopic, mockKafka.getTopicState(topicName));
});
}
Aggregations