use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperatorTest method testReconcile_withResource_withKafka_noPrivate_overriddenName.
/**
* Test reconciliation when a resource has been added both in kafka and in k8s while the operator was down, and both
* topics are identical.
*/
@Test
public void testReconcile_withResource_withKafka_noPrivate_overriddenName(VertxTestContext context) throws InterruptedException {
TopicName topicName = new TopicName("__consumer_offsets");
ResourceName kubeName = new ResourceName("consumer-offsets");
Topic kubeTopic = new Topic.Builder(topicName, kubeName, 10, (short) 2, map("cleanup.policy", "bar"), metadata).build();
Topic kafkaTopic = new Topic.Builder(topicName, 10, (short) 2, map("cleanup.policy", "bar"), metadata).build();
Topic privateTopic = null;
CountDownLatch topicCreatedInKafkaAndK8s = new CountDownLatch(2);
mockKafka.setCreateTopicResponse(topicName_ -> Future.succeededFuture());
mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> topicCreatedInKafkaAndK8s.countDown());
mockK8s.setCreateResponse(kubeName, null);
KafkaTopic topicResource = TopicSerialization.toTopicResource(kubeTopic, labels);
LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString());
mockK8s.createResource(topicResource).onComplete(ar -> topicCreatedInKafkaAndK8s.countDown());
mockTopicStore.setCreateTopicResponse(topicName, null);
topicCreatedInKafkaAndK8s.await();
Checkpoint async = context.checkpoint();
topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> {
assertSucceeded(context, reconcileResult);
mockTopicStore.assertExists(context, topicName);
mockK8s.assertExists(context, kubeName);
mockK8s.assertNotExists(context, topicName.asKubeName());
mockK8s.assertNoEvents(context);
mockKafka.assertExists(context, topicName);
mockTopicStore.read(topicName).onComplete(readResult -> {
context.verify(() -> assertThat(readResult.result(), is(kubeTopic)));
context.verify(() -> assertThat(readResult.result().getResourceName(), is(kubeName)));
async.flag();
});
context.verify(() -> assertThat(mockKafka.getTopicState(topicName), is(kafkaTopic)));
context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(1.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(1.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(1L));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
});
});
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperatorTest method testReconcileMetricsWithPausedTopic.
@Test
public void testReconcileMetricsWithPausedTopic(VertxTestContext context) throws InterruptedException {
mockKafka.setTopicsListResponse(Future.succeededFuture(emptySet()));
mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture());
metadata.getAnnotations().put("strimzi.io/pause-reconciliation", "false");
resourceAdded(context, null, null, null);
Future<?> reconcileFuture = topicOperator.reconcileAllTopics("periodic");
// workaround for the vertx junit5 integration
CountDownLatch latch = new CountDownLatch(2);
CountDownLatch splitLatch = new CountDownLatch(1);
reconcileFuture.onComplete(context.succeeding(e -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(1.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resources.paused").tag("kind", "KafkaTopic").gauge().value(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(1.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(1L));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
latch.countDown();
splitLatch.countDown();
})));
splitLatch.await(10_000, TimeUnit.MILLISECONDS);
metadata.getAnnotations().put("strimzi.io/pause-reconciliation", "true");
resourceAdded(context, null, null, null);
topicOperator.reconcileAllTopics("periodic").onComplete(context.succeeding(f -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(2.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resources.paused").tag("kind", "KafkaTopic").gauge().value(), is(1.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(2.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(2L));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
latch.countDown();
})));
assertThat(latch.await(10_000, TimeUnit.MILLISECONDS), is(true));
context.completeNow();
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperatorTest method testOnTopicCreated.
// TODO ^^ but a disconnected/loss of session error
/**
* 1. operator is notified that a topic is created
* 2. operator successfully queries kafka to get topic metadata
* 3. operator successfully creates KafkaTopic
* 4. operator successfully creates in topic store
*/
@Test
public void testOnTopicCreated(VertxTestContext context) {
TopicMetadata topicMetadata = Utils.getTopicMetadata(topicName.toString(), new org.apache.kafka.clients.admin.Config(Collections.emptyList()));
mockTopicStore.setCreateTopicResponse(topicName, null);
mockKafka.setTopicExistsResult(t -> Future.succeededFuture(true));
mockKafka.setTopicMetadataResponse(topicName, topicMetadata, null);
mockK8s.setCreateResponse(resourceName, null);
LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString());
Checkpoint async = context.checkpoint();
topicOperator.onTopicCreated(logContext, topicName).onComplete(ar -> {
assertSucceeded(context, ar);
mockK8s.assertExists(context, resourceName);
Topic t = TopicSerialization.fromTopicMetadata(topicMetadata);
mockTopicStore.assertContains(context, t);
context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
});
async.flag();
});
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperatorTest method resourceRemoved.
// TODO 3way reconcilation where kafka and kube agree
// TODO 3way reconcilation where all three agree
// TODO 3way reconcilation with conflict
// TODO reconciliation where only private state exists => delete the private state
// TODO tests for the other reconciliation cases
// + non-matching predicate
// + error cases
private void resourceRemoved(VertxTestContext context, CountDownLatch latch, Exception deleteTopicException, Exception storeException) {
Topic kubeTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).build();
Topic kafkaTopic = kubeTopic;
Topic privateTopic = kubeTopic;
mockKafka.setCreateTopicResponse(topicName.toString(), null).createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic);
mockKafka.setTopicMetadataResponse(topicName, Utils.getTopicMetadata(kubeTopic), null);
mockKafka.setDeleteTopicResponse(topicName, deleteTopicException);
mockTopicStore.setCreateTopicResponse(topicName, null).create(privateTopic);
mockTopicStore.setDeleteTopicResponse(topicName, storeException);
KafkaTopic resource = TopicSerialization.toTopicResource(kubeTopic, labels);
LogContext logContext = LogContext.kubeWatch(Watcher.Action.DELETED, resource);
Checkpoint async = context.checkpoint();
topicOperator.onResourceEvent(logContext, resource, DELETED).onComplete(ar -> {
if (deleteTopicException != null || storeException != null) {
if (deleteTopicException != null && deleteTopicException instanceof TopicDeletionDisabledException) {
// For the specific topic deletion disabled exception the exception will be caught and the resource
// event will be processed successfully
assertSucceeded(context, ar);
} else {
// For all other exceptions the resource event will fail.
assertFailed(context, ar);
}
if (deleteTopicException != null) {
// If there was a broker deletion exception the broker topic should still exist
mockKafka.assertExists(context, kafkaTopic.getTopicName());
} else {
mockKafka.assertNotExists(context, kafkaTopic.getTopicName());
}
if (deleteTopicException != null && deleteTopicException instanceof TopicDeletionDisabledException) {
// If there was a topic deletion disabled exception then the Store topic would still be deleted.
mockTopicStore.assertNotExists(context, kafkaTopic.getTopicName());
} else {
mockTopicStore.assertExists(context, kafkaTopic.getTopicName());
}
} else {
assertSucceeded(context, ar);
mockKafka.assertNotExists(context, kafkaTopic.getTopicName());
mockTopicStore.assertNotExists(context, kafkaTopic.getTopicName());
}
async.flag();
if (latch != null) {
latch.countDown();
}
});
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperatorTest method testReconcileMetrics.
@Test
public void testReconcileMetrics(VertxTestContext context) throws InterruptedException {
mockKafka.setTopicsListResponse(Future.succeededFuture(emptySet()));
mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture());
resourceAdded(context, null, null, null);
Future<?> reconcileFuture = topicOperator.reconcileAllTopics("periodic");
Checkpoint async = context.checkpoint();
reconcileFuture.onComplete(context.succeeding(e -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(1.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resources.paused").tag("kind", "KafkaTopic").gauge().value(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(1L));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
async.flag();
})));
}
Aggregations