Search in sources :

Example 31 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOfSkippedExpiredRecords.

/**
 * Verifies that the consumer commits offsets for records whose ttl has expired.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOfSkippedExpiredRecords(final VertxTestContext ctx) throws InterruptedException {
    final int numNonExpiredTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint expiredRecordCheckpoint = receivedRecordsCtx.checkpoint(1);
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numNonExpiredTestRecords);
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        receivedRecordsCheckpoint.flag();
        return Future.succeededFuture();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {

        @Override
        protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord<String, Buffer> record) {
            super.onRecordHandlerSkippedForExpiredRecord(record);
            expiredRecordCheckpoint.flag();
        }
    };
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    final Context consumerVertxContext = vertx.getOrCreateContext();
    consumerVertxContext.runOnContext(v -> {
        consumer.start().onComplete(ctx.succeeding(v2 -> {
            mockConsumer.schedulePollTask(() -> {
                // add record with elapsed ttl
                mockConsumer.addRecord(createRecordWithElapsedTtl());
                IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> {
                    mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
                });
            });
        }));
    });
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    final int numExpiredTestRecords = 1;
    final int latestFullyHandledOffset = numNonExpiredTestRecords + numExpiredTestRecords - 1;
    final VertxTestContext commitCheckContext = new VertxTestContext();
    final Checkpoint commitCheckpoint = commitCheckContext.checkpoint(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset + 1L);
        });
        commitCheckpoint.flag();
    });
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    // (rebalance is done as part of the poll() invocation; the vert.x consumer will schedule that invocation
    // via an action executed on the event loop thread; do this here as well, meaning the record handler
    // run on the event loop thread will be finished once the rebalance get triggered).
    final CountDownLatch latch = new CountDownLatch(1);
    consumerVertxContext.runOnContext(v -> latch.countDown());
    latch.await();
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    assertWithMessage("partition assigned in 5s for checking of commits").that(commitCheckContext.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (commitCheckContext.failed()) {
        ctx.failNow(commitCheckContext.causeOfFailure());
        return;
    }
    ctx.completeNow();
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) Buffer(io.vertx.core.buffer.Buffer) Context(io.vertx.core.Context) VertxTestContext(io.vertx.junit5.VertxTestContext) VertxTestContext(io.vertx.junit5.VertxTestContext) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) CountDownLatch(java.util.concurrent.CountDownLatch) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 32 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testScenarioWithPartitionRevokedWhileHandlingIncomplete.

/**
 * Verifies that a scenario of a partition being revoked and not assigned again, while there are
 * still not fully handled records, is identified by the consumer.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testScenarioWithPartitionRevokedWhileHandlingIncomplete(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, but their handling isn't completed yet
    // do a rebalance with the currently assigned partition not being assigned anymore after it
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.rebalance(List.of(TOPIC2_PARTITION));
    // mark the handling of some records as completed
    recordsHandlingPromiseMap.get(0L).complete();
    recordsHandlingPromiseMap.get(1L).complete();
    recordsHandlingPromiseMap.get(2L).complete();
    final Checkpoint commitCheckDone = ctx.checkpoint(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            // the last rebalance where topicPartition got revoked should have just
            // triggered a commit of offset 0; the 3 records that only got completed
            // after the rebalance shouldn't have been taken into account in the commit
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(0);
        });
        commitCheckDone.flag();
    });
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 33 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsPeriodically.

/**
 * Verifies that the consumer commits the last fully handled records periodically.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerCommitsOffsetsPeriodically(final VertxTestContext ctx) {
    final Promise<Void> testRecordsReceived = Promise.promise();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        testRecordsReceived.complete();
        return Future.succeededFuture();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // 1000ms commit interval - keep the value not too low,
    // otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, 0, "key_0", Buffer.buffer()));
        });
    }));
    testRecordsReceived.future().onComplete(v -> {
        // we have no hook to integrate into for the commit check
        // therefore do the check multiple times with some delay in between
        final AtomicInteger checkCount = new AtomicInteger(0);
        vertx.setPeriodic(200, tid -> {
            checkCount.incrementAndGet();
            // check offsets
            final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
            if (!committed.isEmpty()) {
                ctx.verify(() -> {
                    final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
                    assertThat(offsetAndMetadata).isNotNull();
                    assertThat(offsetAndMetadata.offset()).isEqualTo(1L);
                });
                ctx.completeNow();
                vertx.cancelTimer(tid);
            } else {
                if (checkCount.get() >= 10) {
                    vertx.cancelTimer(tid);
                    ctx.failNow(new AssertionError("offset should have been committed"));
                }
            }
        });
    });
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 34 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
 * all still available records after the committed offset position has gone out of range
 * (because records have been deleted according to the retention config) and the consumer is restarted.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 1;
    final int numTestRecordsPerTopicPerRound = 20;
    // has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
    final int numPartitions = 1;
    // prepare topics
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
    createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
            LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
            kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
                LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
                publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
                    LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
                    final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
                    waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
                });
            });
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // first start of consumer, letting it commit offsets
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
        publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
    }));
    assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (firstConsumerInstanceStartedAndStopped.failed()) {
        ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
        return;
    }
    // preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
    receivedRecords.clear();
    final String lastRecordKey = "lastKey";
    // restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
    final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == expectedNumberOfRecords) {
            ctx.verify(() -> {
                assertThat(receivedRecords.get(0).key()).startsWith("round3");
                assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
            });
            ctx.completeNow();
        }
    };
    LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
    publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
        kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
        kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
            LOG.debug("consumer started, publish another record to be received by the consumer");
            publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
        }));
    });
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) TopicPartition(io.vertx.kafka.client.common.TopicPartition) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 35 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
 * subscription only receives records published after the consumer <em>start()</em> method has completed.
 * <p>
 * Also verifies that all records published after the consumer <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
 * method has completed are received by the consumer, also if the topic was only created after the consumer
 * <em>start</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final String patternPrefix = "test_" + UUID.randomUUID() + "_";
    final int numTopics = 2;
    final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> patternPrefix + i).collect(Collectors.toSet());
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        final Promise<Void> nextRecordReceivedPromise = Promise.promise();
        nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
        LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
        final String newTopic = patternPrefix + "new";
        final String recordKey = "addedAfterStartKey";
        kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic).onComplete(ctx.succeeding(v2 -> {
            LOG.debug("publish record to be received by the consumer");
            publish(newTopic, recordKey, Buffer.buffer("testPayload"));
        }));
        nextRecordReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(1);
                assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
            });
            ctx.completeNow();
        });
    }));
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) Pattern(java.util.regex.Pattern) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) AtomicReference(java.util.concurrent.atomic.AtomicReference) Promise(io.vertx.core.Promise) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)35 List (java.util.List)31 Map (java.util.Map)24 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 Pattern (java.util.regex.Pattern)23 Optional (java.util.Optional)22 TimeUnit (java.util.concurrent.TimeUnit)20 UUID (java.util.UUID)19 Handler (io.vertx.core.Handler)18 Vertx (io.vertx.core.Vertx)18 Buffer (io.vertx.core.buffer.Buffer)18 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)18 Logger (org.slf4j.Logger)18 LoggerFactory (org.slf4j.LoggerFactory)18 Instant (java.time.Instant)17 HashMap (java.util.HashMap)17 Truth.assertThat (com.google.common.truth.Truth.assertThat)16 Future (io.vertx.core.Future)16 Promise (io.vertx.core.Promise)16