Search in sources :

Example 16 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerRespectsMaxRecordsInProcessingLimit.

/**
 * Verifies that the maximum number of records in processing by the consumer does not exceed
 * the limit of 1.5 times the <em>max.poll.records</em> config value.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerRespectsMaxRecordsInProcessingLimit(final VertxTestContext ctx) {
    final int maxPollRecords = 10;
    final int throttlingThreshold = maxPollRecords * AsyncHandlingAutoCommitKafkaConsumer.THROTTLING_THRESHOLD_PERCENTAGE_OF_MAX_POLL_RECORDS / 100;
    final int maxRecordsInProcessing = maxPollRecords + Math.max(throttlingThreshold, 1);
    final int numTestBatches = 5;
    final int numRecordsPerBatch = maxPollRecords;
    final int numRecords = numTestBatches * numRecordsPerBatch;
    final AtomicInteger offsetCounter = new AtomicInteger();
    final Promise<Void> allRecordsReceivedPromise = Promise.promise();
    final List<Promise<Void>> uncompletedRecordHandlingPromises = new ArrayList<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final AtomicInteger observedMaxRecordsInProcessing = new AtomicInteger();
    final AtomicInteger testBatchesToAdd = new AtomicInteger(numTestBatches);
    // let the consumer record handler only complete the record processing when consumer record fetching is already paused (or if all records have been received)
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> recordHandler = record -> {
        receivedRecords.add(record);
        final Promise<Void> recordHandlingCompleted = Promise.promise();
        uncompletedRecordHandlingPromises.add(recordHandlingCompleted);
        if (consumer.isRecordFetchingPaused() || receivedRecords.size() == numRecords) {
            if (uncompletedRecordHandlingPromises.size() > observedMaxRecordsInProcessing.get()) {
                observedMaxRecordsInProcessing.set(uncompletedRecordHandlingPromises.size());
            }
            if (receivedRecords.size() == numRecords) {
                LOG.trace("complete all remaining {} record handling promises", uncompletedRecordHandlingPromises.size());
                uncompletedRecordHandlingPromises.forEach(Promise::tryComplete);
                uncompletedRecordHandlingPromises.clear();
            } else {
                // complete record handling promises until consumer record fetching isn't paused anymore
                completeUntilConsumerRecordFetchingResumed(uncompletedRecordHandlingPromises.iterator());
            }
        }
        if (receivedRecords.size() == numRecords) {
            vertx.runOnContext(v -> allRecordsReceivedPromise.tryComplete());
        }
        return recordHandlingCompleted.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.toString(maxPollRecords));
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), recordHandler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        // schedule the poll tasks
        schedulePollTasksWithConsumerPausedCheck(offsetCounter, numRecordsPerBatch, testBatchesToAdd);
        final long timerId = vertx.setTimer(8000, tid -> {
            LOG.info("received records:\n{}", receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n")));
            allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", uncompletedRecordHandlingPromises.size(), numRecords));
        });
        allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> {
            vertx.cancelTimer(timerId);
            ctx.verify(() -> {
                assertWithMessage("observed max no. of records in processing").that(observedMaxRecordsInProcessing.get()).isEqualTo(maxRecordsInProcessing);
            });
            ctx.completeNow();
        }));
    }));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) Promise(io.vertx.core.Promise) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 17 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOfSkippedExpiredRecords.

/**
 * Verifies that the consumer commits offsets for records whose ttl has expired.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOfSkippedExpiredRecords(final VertxTestContext ctx) throws InterruptedException {
    final int numNonExpiredTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint expiredRecordCheckpoint = receivedRecordsCtx.checkpoint(1);
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numNonExpiredTestRecords);
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        receivedRecordsCheckpoint.flag();
        return Future.succeededFuture();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {

        @Override
        protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord<String, Buffer> record) {
            super.onRecordHandlerSkippedForExpiredRecord(record);
            expiredRecordCheckpoint.flag();
        }
    };
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    final Context consumerVertxContext = vertx.getOrCreateContext();
    consumerVertxContext.runOnContext(v -> {
        consumer.start().onComplete(ctx.succeeding(v2 -> {
            mockConsumer.schedulePollTask(() -> {
                // add record with elapsed ttl
                mockConsumer.addRecord(createRecordWithElapsedTtl());
                IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> {
                    mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
                });
            });
        }));
    });
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    final int numExpiredTestRecords = 1;
    final int latestFullyHandledOffset = numNonExpiredTestRecords + numExpiredTestRecords - 1;
    final VertxTestContext commitCheckContext = new VertxTestContext();
    final Checkpoint commitCheckpoint = commitCheckContext.checkpoint(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset + 1L);
        });
        commitCheckpoint.flag();
    });
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    // (rebalance is done as part of the poll() invocation; the vert.x consumer will schedule that invocation
    // via an action executed on the event loop thread; do this here as well, meaning the record handler
    // run on the event loop thread will be finished once the rebalance get triggered).
    final CountDownLatch latch = new CountDownLatch(1);
    consumerVertxContext.runOnContext(v -> latch.countDown());
    latch.await();
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    assertWithMessage("partition assigned in 5s for checking of commits").that(commitCheckContext.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (commitCheckContext.failed()) {
        ctx.failNow(commitCheckContext.causeOfFailure());
        return;
    }
    ctx.completeNow();
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) Buffer(io.vertx.core.buffer.Buffer) Context(io.vertx.core.Context) VertxTestContext(io.vertx.junit5.VertxTestContext) VertxTestContext(io.vertx.junit5.VertxTestContext) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) CountDownLatch(java.util.concurrent.CountDownLatch) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 18 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testScenarioWithPartitionRevokedWhileHandlingIncomplete.

/**
 * Verifies that a scenario of a partition being revoked and not assigned again, while there are
 * still not fully handled records, is identified by the consumer.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testScenarioWithPartitionRevokedWhileHandlingIncomplete(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, but their handling isn't completed yet
    // do a rebalance with the currently assigned partition not being assigned anymore after it
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.rebalance(List.of(TOPIC2_PARTITION));
    // mark the handling of some records as completed
    recordsHandlingPromiseMap.get(0L).complete();
    recordsHandlingPromiseMap.get(1L).complete();
    recordsHandlingPromiseMap.get(2L).complete();
    final Checkpoint commitCheckDone = ctx.checkpoint(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            // the last rebalance where topicPartition got revoked should have just
            // triggered a commit of offset 0; the 3 records that only got completed
            // after the rebalance shouldn't have been taken into account in the commit
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(0);
        });
        commitCheckDone.flag();
    });
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 19 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsPeriodically.

/**
 * Verifies that the consumer commits the last fully handled records periodically.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerCommitsOffsetsPeriodically(final VertxTestContext ctx) {
    final Promise<Void> testRecordsReceived = Promise.promise();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        testRecordsReceived.complete();
        return Future.succeededFuture();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // 1000ms commit interval - keep the value not too low,
    // otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, 0, "key_0", Buffer.buffer()));
        });
    }));
    testRecordsReceived.future().onComplete(v -> {
        // we have no hook to integrate into for the commit check
        // therefore do the check multiple times with some delay in between
        final AtomicInteger checkCount = new AtomicInteger(0);
        vertx.setPeriodic(200, tid -> {
            checkCount.incrementAndGet();
            // check offsets
            final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
            if (!committed.isEmpty()) {
                ctx.verify(() -> {
                    final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
                    assertThat(offsetAndMetadata).isNotNull();
                    assertThat(offsetAndMetadata.offset()).isEqualTo(1L);
                });
                ctx.completeNow();
                vertx.cancelTimer(tid);
            } else {
                if (checkCount.get() >= 10) {
                    vertx.cancelTimer(tid);
                    ctx.failNow(new AssertionError("offset should have been committed"));
                }
            }
        });
    });
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 20 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class HonoKafkaConsumerTest method testConsumerInvokesHandlerOnReceivedRecords.

/**
 * Verifies that the HonoKafkaConsumer invokes the provided handler on received records.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerInvokesHandlerOnReceivedRecords(final VertxTestContext ctx) {
    final int numTestRecords = 5;
    final Checkpoint receivedRecordsCheckpoint = ctx.checkpoint(numTestRecords);
    final Handler<KafkaConsumerRecord<String, Buffer>> handler = record -> {
        receivedRecordsCheckpoint.flag();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumer = new HonoKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L));
    mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L));
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) Json(io.vertx.core.json.Json) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Timeout(io.vertx.junit5.Timeout) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Vertx(io.vertx.core.Vertx) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Truth.assertThat(com.google.common.truth.Truth.assertThat) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Handler(io.vertx.core.Handler) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Aggregations

Buffer (io.vertx.core.buffer.Buffer)27 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)27 Vertx (io.vertx.core.Vertx)25 List (java.util.List)25 Future (io.vertx.core.Future)24 Handler (io.vertx.core.Handler)23 Promise (io.vertx.core.Promise)22 Map (java.util.Map)22 Logger (org.slf4j.Logger)22 LoggerFactory (org.slf4j.LoggerFactory)22 Truth.assertThat (com.google.common.truth.Truth.assertThat)21 Optional (java.util.Optional)21 Set (java.util.Set)21 UUID (java.util.UUID)21 Test (org.junit.jupiter.api.Test)21 VertxExtension (io.vertx.junit5.VertxExtension)20 VertxTestContext (io.vertx.junit5.VertxTestContext)20 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)20 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)20 ArrayList (java.util.ArrayList)19