Search in sources :

Example 6 with KafkaConsumer

use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.

the class HonoKafkaConsumer method ensureTopicIsAmongSubscribedTopicPatternTopics.

/**
 * Tries to ensure that the given topic is part of the list of topics this consumer is subscribed to.
 * <p>
 * The successful completion of the returned Future means that the topic exists and is among the subscribed topics.
 * It also means that if partitions of this topic have been assigned to this consumer and if the offset reset config
 * is set to "latest", the positions for these partitions have already been fetched and the consumer will receive
 * records published thereafter.
 * If all topic partition are assigned to other consumers, a best-effort approach is taken to let the result
 * Future be completed after this assignment has been done.
 * <p>
 * Note that this method is only applicable if a topic pattern subscription is used, otherwise an
 * {@link IllegalStateException} is thrown.
 * <p>
 * This method is needed for scenarios where the given topic either has just been created and this consumer doesn't
 * know about it yet or the topic doesn't exist yet. In the latter case, this method will try to trigger creation of
 * the topic, which may succeed if topic auto-creation is enabled, and wait for the following rebalance to check
 * if the topic is part of the subscribed topics then.
 *
 * @param topic The topic to use.
 * @return A future indicating the outcome of the operation. The Future is succeeded if the topic exists and
 *         is among the subscribed topics. The Future is failed with a {@link ServerErrorException} if the topic
 *         doesn't exist or there was an error determining whether the topic is part of the subscription.
 * @throws IllegalArgumentException If this consumer doesn't use a topic pattern or the topic doesn't match the pattern.
 * @throws IllegalStateException if the consumer hasn't been created with a set of topics to consume from.
 * @throws NullPointerException if topic is {@code null}.
 */
public final Future<Void> ensureTopicIsAmongSubscribedTopicPatternTopics(final String topic) {
    Objects.requireNonNull(topic);
    if (topics != null) {
        throw new IllegalStateException("consumer doesn't use topic pattern");
    } else if (!topicPattern.matcher(topic).find()) {
        throw new IllegalArgumentException("topic doesn't match pattern");
    }
    if (kafkaConsumer == null) {
        return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_INTERNAL_ERROR, "not started"));
    } else if (stopCalled.get()) {
        return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "already stopped"));
    }
    // use previously updated topics list (less costly than invoking kafkaConsumer.subscription() here)
    if (subscribedTopicPatternTopics.contains(topic)) {
        log.debug("ensureTopicIsAmongSubscribedTopics: topic is already subscribed [{}]", topic);
        return Future.succeededFuture();
    }
    final Set<String> subscribedTopicPatternTopicsBefore = new HashSet<>(subscribedTopicPatternTopics);
    // check whether topic has been created since the last rebalance and if not, potentially create it here implicitly
    // (partitionsFor() will create the topic if it doesn't exist, provided "auto.create.topics.enable" is true)
    final Promise<Void> resultPromise = Promise.promise();
    final Future<Void> topicCheckFuture = HonoKafkaConsumerHelper.partitionsFor(kafkaConsumer, topic).onFailure(thr -> log.warn("ensureTopicIsAmongSubscribedTopics: error getting partitions for topic [{}]", topic, thr)).compose(partitions -> {
        if (partitions.isEmpty()) {
            log.warn("ensureTopicIsAmongSubscribedTopics: topic doesn't exist and didn't get auto-created: {}", topic);
            return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "command topic doesn't exist and didn't get auto-created"));
        }
        return Future.succeededFuture();
    }).onFailure(resultPromise::tryFail).mapEmpty();
    // the topic list of a wildcard subscription only gets refreshed periodically by default (interval is defined by "metadata.max.age.ms");
    // therefore enforce a refresh here by again subscribing to the topic pattern
    log.debug("ensureTopicIsAmongSubscribedTopics: wait for subscription update and rebalance [{}]", topic);
    // not composed with the topicCheckFuture outcome in order for both operations to be invoked directly after one another, with no poll() in between
    subscribeAndWaitForRebalance().compose(v -> {
        final boolean someTopicDeleted = subscribedTopicPatternTopicsBefore.stream().anyMatch(t -> !subscribedTopicPatternTopics.contains(t));
        if (!subscribedTopicPatternTopics.contains(topic)) {
            // first metadata refresh could have failed with a LEADER_NOT_AVAILABLE error for the new topic;
            // seems to happen when some other topics have just been deleted for example
            log.debug("ensureTopicIsAmongSubscribedTopics: subscription not updated with topic after rebalance; try again [topic: {}]", topic);
            return subscribeAndWaitForRebalance();
        } else if (isCooperativeRebalancingConfigured() && someTopicDeleted && isAutoOffsetResetConfigLatest()) {
            // (most probable case here is that the rebalance was triggered by a recent deletion of topics, other cases aren't handled here)
            return kafkaConsumer.assignment().compose(partitions -> {
                if (partitions.stream().anyMatch(p -> p.getTopic().equals(topic))) {
                    return Future.succeededFuture(v);
                } else {
                    log.debug("ensureTopicIsAmongSubscribedTopics: wait for another rebalance before considering update of topic subscription [{}] as done", topic);
                    final Promise<Void> rebalanceResultPromise = Promise.promise();
                    runOnKafkaWorkerThread(v2 -> {
                        getUnderlyingConsumer().enforceRebalance();
                        runOnContext(v3 -> subscribeAndWaitForRebalance().onComplete(rebalanceResultPromise));
                    });
                    return rebalanceResultPromise.future();
                }
            });
        }
        return Future.succeededFuture(v);
    }).compose(v -> {
        if (!subscribedTopicPatternTopics.contains(topic)) {
            log.warn("ensureTopicIsAmongSubscribedTopics: subscription not updated with topic after rebalance [topic: {}]", topic);
            return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "subscription not updated with topic after rebalance"));
        }
        log.debug("ensureTopicIsAmongSubscribedTopics: done updating topic subscription [{}]", topic);
        return Future.succeededFuture(v);
    }).onComplete(ar -> Futures.tryHandleResult(resultPromise, ar));
    if (!isAutoOffsetResetConfigLatest()) {
        // offset reset policy is "earliest" - no need to wait for rebalance and offset reset before completing the result future
        // BUT: the rebalance-triggering logic above still needs to be done in the background - otherwise it could take much longer for the earliest record to actually be received
        // (note that topicCheckFuture isn't returned directly here because the subscribeAndWaitForRebalance() Future could still be completed sooner)
        topicCheckFuture.onSuccess(v -> resultPromise.tryComplete());
    }
    return resultPromise.future();
}
Also used : HttpURLConnection(java.net.HttpURLConnection) Lifecycle(org.eclipse.hono.util.Lifecycle) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RegisterForReflection(io.quarkus.runtime.annotations.RegisterForReflection) KafkaConsumerRecords(io.vertx.kafka.client.consumer.KafkaConsumerRecords) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) KafkaClientFactory(org.eclipse.hono.client.kafka.KafkaClientFactory) Context(io.vertx.core.Context) HashSet(java.util.HashSet) CompositeFuture(io.vertx.core.CompositeFuture) Helper(io.vertx.kafka.client.common.impl.Helper) KafkaClientMetricsSupport(org.eclipse.hono.client.kafka.metrics.KafkaClientMetricsSupport) Duration(java.time.Duration) Map(java.util.Map) Pair(org.eclipse.hono.util.Pair) KafkaReadStreamImpl(io.vertx.kafka.client.consumer.impl.KafkaReadStreamImpl) LinkedList(java.util.LinkedList) ExecutorService(java.util.concurrent.ExecutorService) Consumer(org.apache.kafka.clients.consumer.Consumer) Futures(org.eclipse.hono.util.Futures) Logger(org.slf4j.Logger) Collection(java.util.Collection) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Set(java.util.Set) ServerErrorException(org.eclipse.hono.client.ServerErrorException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) KafkaRecordHelper(org.eclipse.hono.client.kafka.KafkaRecordHelper) Field(java.lang.reflect.Field) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Objects(java.util.Objects) TimeUnit(java.util.concurrent.TimeUnit) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Stream(java.util.stream.Stream) Metrics(org.apache.kafka.common.metrics.Metrics) Buffer(io.vertx.core.buffer.Buffer) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) Handler(io.vertx.core.Handler) Promise(io.vertx.core.Promise) ServerErrorException(org.eclipse.hono.client.ServerErrorException) HashSet(java.util.HashSet)

Example 7 with KafkaConsumer

use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
 * all still available records after the committed offset position has gone out of range
 * (because records have been deleted according to the retention config) and the consumer is restarted.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 1;
    final int numTestRecordsPerTopicPerRound = 20;
    // has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
    final int numPartitions = 1;
    // prepare topics
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
    createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
            LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
            kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
                LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
                publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
                    LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
                    final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
                    waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
                });
            });
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // first start of consumer, letting it commit offsets
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
        publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
    }));
    assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (firstConsumerInstanceStartedAndStopped.failed()) {
        ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
        return;
    }
    // preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
    receivedRecords.clear();
    final String lastRecordKey = "lastKey";
    // restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
    final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == expectedNumberOfRecords) {
            ctx.verify(() -> {
                assertThat(receivedRecords.get(0).key()).startsWith("round3");
                assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
            });
            ctx.completeNow();
        }
    };
    LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
    publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
        kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
        kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
            LOG.debug("consumer started, publish another record to be received by the consumer");
            publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
        }));
    });
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) TopicPartition(io.vertx.kafka.client.common.TopicPartition) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 8 with KafkaConsumer

use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
 * subscription only receives records published after the consumer <em>start()</em> method has completed.
 * <p>
 * Also verifies that all records published after the consumer <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
 * method has completed are received by the consumer, also if the topic was only created after the consumer
 * <em>start</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final String patternPrefix = "test_" + UUID.randomUUID() + "_";
    final int numTopics = 2;
    final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> patternPrefix + i).collect(Collectors.toSet());
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        final Promise<Void> nextRecordReceivedPromise = Promise.promise();
        nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
        LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
        final String newTopic = patternPrefix + "new";
        final String recordKey = "addedAfterStartKey";
        kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic).onComplete(ctx.succeeding(v2 -> {
            LOG.debug("publish record to be received by the consumer");
            publish(newTopic, recordKey, Buffer.buffer("testPayload"));
        }));
        nextRecordReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(1);
                assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
            });
            ctx.completeNow();
        });
    }));
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) Pattern(java.util.regex.Pattern) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) AtomicReference(java.util.concurrent.atomic.AtomicReference) Promise(io.vertx.core.Promise) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 9 with KafkaConsumer

use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterStart.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy only receives
 * records published after the consumer <em>start()</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 2;
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final String publishedAfterStartRecordKey = "publishedAfterStartKey";
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        // verify received record
        ctx.verify(() -> assertThat(record.key()).isEqualTo(publishedAfterStartRecordKey));
        ctx.completeNow();
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.debug("consumer started, publish record to be received by the consumer");
        publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload"));
    }));
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("timeout waiting for record to be received"));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

CompositeFuture (io.vertx.core.CompositeFuture)9 Future (io.vertx.core.Future)9 Handler (io.vertx.core.Handler)9 Promise (io.vertx.core.Promise)9 Vertx (io.vertx.core.Vertx)9 Buffer (io.vertx.core.buffer.Buffer)9 TopicPartition (io.vertx.kafka.client.common.TopicPartition)9 KafkaConsumer (io.vertx.kafka.client.consumer.KafkaConsumer)9 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)9 Duration (java.time.Duration)9 Collection (java.util.Collection)9 List (java.util.List)9 Map (java.util.Map)9 Optional (java.util.Optional)9 Set (java.util.Set)9 UUID (java.util.UUID)9 TimeUnit (java.util.concurrent.TimeUnit)9 AtomicReference (java.util.concurrent.atomic.AtomicReference)9 Pattern (java.util.regex.Pattern)9 Collectors (java.util.stream.Collectors)9