Search in sources :

Example 31 with KafkaTopic

use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.

the class TopicOperatorTest method testReconcile_withResource_withKafka_noPrivate_overriddenName.

/**
 * Test reconciliation when a resource has been added both in kafka and in k8s while the operator was down, and both
 * topics are identical.
 */
@Test
public void testReconcile_withResource_withKafka_noPrivate_overriddenName(VertxTestContext context) throws InterruptedException {
    TopicName topicName = new TopicName("__consumer_offsets");
    ResourceName kubeName = new ResourceName("consumer-offsets");
    Topic kubeTopic = new Topic.Builder(topicName, kubeName, 10, (short) 2, map("cleanup.policy", "bar"), metadata).build();
    Topic kafkaTopic = new Topic.Builder(topicName, 10, (short) 2, map("cleanup.policy", "bar"), metadata).build();
    Topic privateTopic = null;
    CountDownLatch topicCreatedInKafkaAndK8s = new CountDownLatch(2);
    mockKafka.setCreateTopicResponse(topicName_ -> Future.succeededFuture());
    mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> topicCreatedInKafkaAndK8s.countDown());
    mockK8s.setCreateResponse(kubeName, null);
    KafkaTopic topicResource = TopicSerialization.toTopicResource(kubeTopic, labels);
    LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString());
    mockK8s.createResource(topicResource).onComplete(ar -> topicCreatedInKafkaAndK8s.countDown());
    mockTopicStore.setCreateTopicResponse(topicName, null);
    topicCreatedInKafkaAndK8s.await();
    Checkpoint async = context.checkpoint();
    topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> {
        assertSucceeded(context, reconcileResult);
        mockTopicStore.assertExists(context, topicName);
        mockK8s.assertExists(context, kubeName);
        mockK8s.assertNotExists(context, topicName.asKubeName());
        mockK8s.assertNoEvents(context);
        mockKafka.assertExists(context, topicName);
        mockTopicStore.read(topicName).onComplete(readResult -> {
            context.verify(() -> assertThat(readResult.result(), is(kubeTopic)));
            context.verify(() -> assertThat(readResult.result().getResourceName(), is(kubeName)));
            async.flag();
        });
        context.verify(() -> assertThat(mockKafka.getTopicState(topicName), is(kafkaTopic)));
        context.verify(() -> {
            MeterRegistry registry = metrics.meterRegistry();
            assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(1.0));
            assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(1.0));
            assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
            assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(1L));
            assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
        });
    });
}
Also used : Checkpoint(io.vertx.junit5.Checkpoint) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) CountDownLatch(java.util.concurrent.CountDownLatch) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Test(org.junit.jupiter.api.Test)

Example 32 with KafkaTopic

use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.

the class TopicOperatorTest method testReconcileMetricsWithPausedTopic.

@Test
public void testReconcileMetricsWithPausedTopic(VertxTestContext context) throws InterruptedException {
    mockKafka.setTopicsListResponse(Future.succeededFuture(emptySet()));
    mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture());
    metadata.getAnnotations().put("strimzi.io/pause-reconciliation", "false");
    resourceAdded(context, null, null, null);
    Future<?> reconcileFuture = topicOperator.reconcileAllTopics("periodic");
    // workaround for the vertx junit5 integration
    CountDownLatch latch = new CountDownLatch(2);
    CountDownLatch splitLatch = new CountDownLatch(1);
    reconcileFuture.onComplete(context.succeeding(e -> context.verify(() -> {
        MeterRegistry registry = metrics.meterRegistry();
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(1.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resources.paused").tag("kind", "KafkaTopic").gauge().value(), is(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(1.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(1L));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
        latch.countDown();
        splitLatch.countDown();
    })));
    splitLatch.await(10_000, TimeUnit.MILLISECONDS);
    metadata.getAnnotations().put("strimzi.io/pause-reconciliation", "true");
    resourceAdded(context, null, null, null);
    topicOperator.reconcileAllTopics("periodic").onComplete(context.succeeding(f -> context.verify(() -> {
        MeterRegistry registry = metrics.meterRegistry();
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(2.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resources.paused").tag("kind", "KafkaTopic").gauge().value(), is(1.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(2.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(2L));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
        latch.countDown();
    })));
    assertThat(latch.await(10_000, TimeUnit.MILLISECONDS), is(true));
    context.completeNow();
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) BeforeEach(org.junit.jupiter.api.BeforeEach) Watcher(io.fabric8.kubernetes.client.Watcher) MaxAttemptsExceededException(io.strimzi.operator.common.MaxAttemptsExceededException) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) AfterAll(org.junit.jupiter.api.AfterAll) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) MicrometerMetricsOptions(io.vertx.micrometer.MicrometerMetricsOptions) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Collections.singleton(java.util.Collections.singleton) BeforeAll(org.junit.jupiter.api.BeforeAll) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) KafkaTopicStatus(io.strimzi.api.kafka.model.status.KafkaTopicStatus) ADDED(io.fabric8.kubernetes.client.Watcher.Action.ADDED) DELETED(io.fabric8.kubernetes.client.Watcher.Action.DELETED) MetricsProvider(io.strimzi.operator.common.MetricsProvider) VertxOptions(io.vertx.core.VertxOptions) VertxPrometheusOptions(io.vertx.micrometer.VertxPrometheusOptions) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) Objects(java.util.Objects) CountDownLatch(java.util.concurrent.CountDownLatch) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) ObjectMeta(io.fabric8.kubernetes.api.model.ObjectMeta) KafkaTopicBuilder(io.strimzi.api.kafka.model.KafkaTopicBuilder) TopicDeletionDisabledException(org.apache.kafka.common.errors.TopicDeletionDisabledException) Checkpoint(io.vertx.junit5.Checkpoint) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) MODIFIED(io.fabric8.kubernetes.client.Watcher.Action.MODIFIED) Collections.singletonMap(java.util.Collections.singletonMap) AsyncResult(io.vertx.core.AsyncResult) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ObjectMetaBuilder(io.fabric8.kubernetes.api.model.ObjectMetaBuilder) Collections.emptySet(java.util.Collections.emptySet) MicrometerMetricsProvider(io.strimzi.operator.common.MicrometerMetricsProvider) Vertx(io.vertx.core.Vertx) TimeUnit(java.util.concurrent.TimeUnit) Reconciliation(io.strimzi.operator.common.Reconciliation) AfterEach(org.junit.jupiter.api.AfterEach) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Collections(java.util.Collections) CountDownLatch(java.util.concurrent.CountDownLatch) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Test(org.junit.jupiter.api.Test)

Example 33 with KafkaTopic

use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.

the class TopicOperatorTest method testOnTopicCreated.

// TODO ^^ but a disconnected/loss of session error
/**
 * 1. operator is notified that a topic is created
 * 2. operator successfully queries kafka to get topic metadata
 * 3. operator successfully creates KafkaTopic
 * 4. operator successfully creates in topic store
 */
@Test
public void testOnTopicCreated(VertxTestContext context) {
    TopicMetadata topicMetadata = Utils.getTopicMetadata(topicName.toString(), new org.apache.kafka.clients.admin.Config(Collections.emptyList()));
    mockTopicStore.setCreateTopicResponse(topicName, null);
    mockKafka.setTopicExistsResult(t -> Future.succeededFuture(true));
    mockKafka.setTopicMetadataResponse(topicName, topicMetadata, null);
    mockK8s.setCreateResponse(resourceName, null);
    LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString());
    Checkpoint async = context.checkpoint();
    topicOperator.onTopicCreated(logContext, topicName).onComplete(ar -> {
        assertSucceeded(context, ar);
        mockK8s.assertExists(context, resourceName);
        Topic t = TopicSerialization.fromTopicMetadata(topicMetadata);
        mockTopicStore.assertContains(context, t);
        context.verify(() -> {
            MeterRegistry registry = metrics.meterRegistry();
            assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
        });
        async.flag();
    });
}
Also used : Checkpoint(io.vertx.junit5.Checkpoint) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Test(org.junit.jupiter.api.Test)

Example 34 with KafkaTopic

use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.

the class TopicOperatorTest method resourceRemoved.

// TODO 3way reconcilation where kafka and kube agree
// TODO 3way reconcilation where all three agree
// TODO 3way reconcilation with conflict
// TODO reconciliation where only private state exists => delete the private state
// TODO tests for the other reconciliation cases
// + non-matching predicate
// + error cases
private void resourceRemoved(VertxTestContext context, CountDownLatch latch, Exception deleteTopicException, Exception storeException) {
    Topic kubeTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).build();
    Topic kafkaTopic = kubeTopic;
    Topic privateTopic = kubeTopic;
    mockKafka.setCreateTopicResponse(topicName.toString(), null).createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic);
    mockKafka.setTopicMetadataResponse(topicName, Utils.getTopicMetadata(kubeTopic), null);
    mockKafka.setDeleteTopicResponse(topicName, deleteTopicException);
    mockTopicStore.setCreateTopicResponse(topicName, null).create(privateTopic);
    mockTopicStore.setDeleteTopicResponse(topicName, storeException);
    KafkaTopic resource = TopicSerialization.toTopicResource(kubeTopic, labels);
    LogContext logContext = LogContext.kubeWatch(Watcher.Action.DELETED, resource);
    Checkpoint async = context.checkpoint();
    topicOperator.onResourceEvent(logContext, resource, DELETED).onComplete(ar -> {
        if (deleteTopicException != null || storeException != null) {
            if (deleteTopicException != null && deleteTopicException instanceof TopicDeletionDisabledException) {
                // For the specific topic deletion disabled exception the exception will be caught and the resource
                // event will be processed successfully
                assertSucceeded(context, ar);
            } else {
                // For all other exceptions the resource event will fail.
                assertFailed(context, ar);
            }
            if (deleteTopicException != null) {
                // If there was a broker deletion exception the broker topic should still exist
                mockKafka.assertExists(context, kafkaTopic.getTopicName());
            } else {
                mockKafka.assertNotExists(context, kafkaTopic.getTopicName());
            }
            if (deleteTopicException != null && deleteTopicException instanceof TopicDeletionDisabledException) {
                // If there was a topic deletion disabled exception then the Store topic would still be deleted.
                mockTopicStore.assertNotExists(context, kafkaTopic.getTopicName());
            } else {
                mockTopicStore.assertExists(context, kafkaTopic.getTopicName());
            }
        } else {
            assertSucceeded(context, ar);
            mockKafka.assertNotExists(context, kafkaTopic.getTopicName());
            mockTopicStore.assertNotExists(context, kafkaTopic.getTopicName());
        }
        async.flag();
        if (latch != null) {
            latch.countDown();
        }
    });
}
Also used : Checkpoint(io.vertx.junit5.Checkpoint) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) TopicDeletionDisabledException(org.apache.kafka.common.errors.TopicDeletionDisabledException) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic)

Example 35 with KafkaTopic

use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.

the class TopicOperatorTest method testReconcileMetrics.

@Test
public void testReconcileMetrics(VertxTestContext context) throws InterruptedException {
    mockKafka.setTopicsListResponse(Future.succeededFuture(emptySet()));
    mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture());
    resourceAdded(context, null, null, null);
    Future<?> reconcileFuture = topicOperator.reconcileAllTopics("periodic");
    Checkpoint async = context.checkpoint();
    reconcileFuture.onComplete(context.succeeding(e -> context.verify(() -> {
        MeterRegistry registry = metrics.meterRegistry();
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations").tag("kind", "KafkaTopic").counter().count(), is(1.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resources.paused").tag("kind", "KafkaTopic").gauge().value(), is(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "KafkaTopic").counter().count(), is(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "KafkaTopic").counter().count(), is(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().count(), is(1L));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "KafkaTopic").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
        assertThat(registry.get(TopicOperator.METRICS_PREFIX + "resource.state").tag("kind", "KafkaTopic").tag("name", topicName.toString()).tag("resource-namespace", "default-namespace").gauge().value(), is(1.0));
        async.flag();
    })));
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) BeforeEach(org.junit.jupiter.api.BeforeEach) Watcher(io.fabric8.kubernetes.client.Watcher) MaxAttemptsExceededException(io.strimzi.operator.common.MaxAttemptsExceededException) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) AfterAll(org.junit.jupiter.api.AfterAll) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) MicrometerMetricsOptions(io.vertx.micrometer.MicrometerMetricsOptions) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Collections.singleton(java.util.Collections.singleton) BeforeAll(org.junit.jupiter.api.BeforeAll) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) KafkaTopicStatus(io.strimzi.api.kafka.model.status.KafkaTopicStatus) ADDED(io.fabric8.kubernetes.client.Watcher.Action.ADDED) DELETED(io.fabric8.kubernetes.client.Watcher.Action.DELETED) MetricsProvider(io.strimzi.operator.common.MetricsProvider) VertxOptions(io.vertx.core.VertxOptions) VertxPrometheusOptions(io.vertx.micrometer.VertxPrometheusOptions) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) Objects(java.util.Objects) CountDownLatch(java.util.concurrent.CountDownLatch) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) ObjectMeta(io.fabric8.kubernetes.api.model.ObjectMeta) KafkaTopicBuilder(io.strimzi.api.kafka.model.KafkaTopicBuilder) TopicDeletionDisabledException(org.apache.kafka.common.errors.TopicDeletionDisabledException) Checkpoint(io.vertx.junit5.Checkpoint) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) MODIFIED(io.fabric8.kubernetes.client.Watcher.Action.MODIFIED) Collections.singletonMap(java.util.Collections.singletonMap) AsyncResult(io.vertx.core.AsyncResult) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ObjectMetaBuilder(io.fabric8.kubernetes.api.model.ObjectMetaBuilder) Collections.emptySet(java.util.Collections.emptySet) MicrometerMetricsProvider(io.strimzi.operator.common.MicrometerMetricsProvider) Vertx(io.vertx.core.Vertx) TimeUnit(java.util.concurrent.TimeUnit) Reconciliation(io.strimzi.operator.common.Reconciliation) AfterEach(org.junit.jupiter.api.AfterEach) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Collections(java.util.Collections) Checkpoint(io.vertx.junit5.Checkpoint) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Test(org.junit.jupiter.api.Test)

Aggregations

KafkaTopic (io.strimzi.api.kafka.model.KafkaTopic)187 Test (org.junit.jupiter.api.Test)92 KafkaTopicBuilder (io.strimzi.api.kafka.model.KafkaTopicBuilder)80 Checkpoint (io.vertx.junit5.Checkpoint)46 ObjectMetaBuilder (io.fabric8.kubernetes.api.model.ObjectMetaBuilder)38 HashMap (java.util.HashMap)32 ObjectMeta (io.fabric8.kubernetes.api.model.ObjectMeta)30 CountDownLatch (java.util.concurrent.CountDownLatch)28 NewTopic (org.apache.kafka.clients.admin.NewTopic)28 List (java.util.List)26 Map (java.util.Map)26 MeterRegistry (io.micrometer.core.instrument.MeterRegistry)22 KafkaTopicStatus (io.strimzi.api.kafka.model.status.KafkaTopicStatus)22 AsyncResult (io.vertx.core.AsyncResult)22 MaxAttemptsExceededException (io.strimzi.operator.common.MaxAttemptsExceededException)20 Vertx (io.vertx.core.Vertx)20 Matchers.containsString (org.hamcrest.Matchers.containsString)20 Watcher (io.fabric8.kubernetes.client.Watcher)18 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)18 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)18