Search in sources :

Example 6 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnRebalance.

/**
 * Verifies that the consumer commits the last fully handled records on rebalance.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOnRebalance(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, complete the handling of some of them
    recordsHandlingPromiseMap.get(0L).complete();
    recordsHandlingPromiseMap.get(1L).complete();
    // offset 3 not completed yet, hence offset 1 is the latest in the row of fully handled records
    final AtomicInteger latestFullyHandledOffset = new AtomicInteger(1);
    recordsHandlingPromiseMap.get(4L).complete();
    // define VertxTestContexts for 3 checks (3x rebalance/commit)
    final AtomicInteger checkIndex = new AtomicInteger(0);
    final List<VertxTestContext> commitCheckContexts = IntStream.range(0, 3).mapToObj(i -> new VertxTestContext()).collect(Collectors.toList());
    final List<Checkpoint> commitCheckpoints = commitCheckContexts.stream().map(c -> c.checkpoint(1)).collect(Collectors.toList());
    final InterruptableSupplier<Boolean> waitForCurrentCommitCheckResult = () -> {
        assertWithMessage("partition assigned in 5s for checking of commits").that(commitCheckContexts.get(checkIndex.get()).awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
        if (commitCheckContexts.get(checkIndex.get()).failed()) {
            ctx.failNow(commitCheckContexts.get(checkIndex.get()).causeOfFailure());
            return false;
        }
        return true;
    };
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset.get() + 1L);
        });
        commitCheckpoints.get(checkIndex.get()).flag();
    });
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    if (!waitForCurrentCommitCheckResult.get()) {
        return;
    }
    checkIndex.incrementAndGet();
    // now another rebalance (ie. commit trigger) - no change in offsets
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    if (!waitForCurrentCommitCheckResult.get()) {
        return;
    }
    checkIndex.incrementAndGet();
    // now complete some more promises
    recordsHandlingPromiseMap.get(2L).complete();
    recordsHandlingPromiseMap.get(3L).complete();
    // offset 4 already complete
    latestFullyHandledOffset.set(4);
    // again rebalance/commit
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    if (waitForCurrentCommitCheckResult.get()) {
        ctx.completeNow();
    }
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 7 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion.

/**
 * Verifies that the consumer commits record offsets on rebalance, having waited some time for record
 * handling to be completed.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "21000");
    final AtomicReference<Handler<Void>> onNextPartitionsRevokedBlockingHandlerRef = new AtomicReference<>();
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {

        @Override
        protected void onPartitionsRevokedBlocking(final Set<io.vertx.kafka.client.common.TopicPartition> partitionsSet) {
            Optional.ofNullable(onNextPartitionsRevokedBlockingHandlerRef.get()).ifPresent(handler -> handler.handle(null));
            onNextPartitionsRevokedBlockingHandlerRef.set(null);
            super.onPartitionsRevokedBlocking(partitionsSet);
        }
    };
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    final Context consumerVertxContext = vertx.getOrCreateContext();
    consumerVertxContext.runOnContext(v -> {
        consumer.start().onComplete(ctx.succeeding(v2 -> {
            mockConsumer.schedulePollTask(() -> {
                IntStream.range(0, numTestRecords).forEach(offset -> {
                    mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
                });
            });
        }));
    });
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, complete the handling of all except the first 2 records
    LongStream.range(2, numTestRecords).forEach(offset -> recordsHandlingPromiseMap.get(offset).complete());
    ctx.verify(() -> assertThat(recordsHandlingPromiseMap.get(1L).future().isComplete()).isFalse());
    // partitions revoked handler shall get called after the blocking partitions-revoked handling has waited for the records to be marked as completed
    consumer.setOnPartitionsRevokedHandler(s -> {
        ctx.verify(() -> assertThat(recordsHandlingPromiseMap.get(1L).future().isComplete()).isTrue());
    });
    final Checkpoint commitCheckDone = ctx.checkpoint(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(numTestRecords);
        });
        commitCheckDone.flag();
    });
    // trigger a rebalance where the currently assigned partition is revoked
    // (and then assigned again - otherwise its offset wouldn't be returned by mockConsumer.committed())
    // the remaining 2 records are to be marked as completed with some delay
    onNextPartitionsRevokedBlockingHandlerRef.set(v -> {
        consumerVertxContext.runOnContext(v2 -> {
            recordsHandlingPromiseMap.get(0L).complete();
            recordsHandlingPromiseMap.get(1L).complete();
        });
    });
    mockConsumer.setRevokeAllOnRebalance(true);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.setNextPollRebalancePartitionAssignment(List.of(TOPIC_PARTITION, TOPIC2_PARTITION));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Context(io.vertx.core.Context) VertxTestContext(io.vertx.junit5.VertxTestContext) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Handler(io.vertx.core.Handler) AtomicReference(java.util.concurrent.atomic.AtomicReference) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 8 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsInitialOffset.

/**
 * Verifies that the consumer commits the initial partition offset on the first offset commit after
 * the partition got assigned to the consumer.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsInitialOffset(final VertxTestContext ctx) throws InterruptedException {
    final Promise<Void> testRecordsReceived = Promise.promise();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        testRecordsReceived.complete();
        return Future.succeededFuture();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // 1000ms commit interval - keep the value not too low,
    // otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    final VertxTestContext consumerStartedCtx = new VertxTestContext();
    final Checkpoint consumerStartedCheckpoint = consumerStartedCtx.checkpoint(2);
    consumer.setOnRebalanceDoneHandler(s -> consumerStartedCheckpoint.flag());
    vertx.getOrCreateContext().runOnContext(v -> {
        consumer.start().onSuccess(v2 -> consumerStartedCheckpoint.flag());
    });
    assertWithMessage("consumer started in 5s").that(consumerStartedCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (consumerStartedCtx.failed()) {
        ctx.failNow(consumerStartedCtx.causeOfFailure());
        return;
    }
    final List<Map<TopicPartition, OffsetAndMetadata>> reportedCommits = new ArrayList<>();
    mockConsumer.addCommitListener(reportedCommits::add);
    final CountDownLatch rebalance1Done = new CountDownLatch(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION, TOPIC2_PARTITION));
        ctx.verify(() -> {
            // the rebalance where topicPartition got revoked should have triggered a commit of offset 0 for topicPartition
            assertThat(reportedCommits.size()).isEqualTo(1);
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(0);
        });
    });
    consumer.setOnRebalanceDoneHandler(s -> rebalance1Done.countDown());
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.rebalance(List.of(TOPIC2_PARTITION));
    if (!rebalance1Done.await(5, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("partitionsAssigned handler not invoked"));
    }
    final CountDownLatch rebalance2Done = new CountDownLatch(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION, TOPIC2_PARTITION));
        ctx.verify(() -> {
            // the 2nd rebalance where topic2Partition got revoked and topicPartition got assigned
            // should have triggered a commit of offset 0 for topic2Partition
            assertThat(reportedCommits.size()).isEqualTo(2);
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC2_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(0);
        });
    });
    consumer.setOnRebalanceDoneHandler(s -> rebalance2Done.countDown());
    // now again force a rebalance which should trigger the above onPartitionsAssignedHandler
    // - this time again with the first partition
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    if (!rebalance2Done.await(5, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("partitionsAssigned handler not invoked"));
    }
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        ctx.verify(() -> {
            // the 3rd rebalance where all partitions got revoked should have triggered no new commits
            assertThat(reportedCommits.size()).isEqualTo(2);
        });
        ctx.completeNow();
    });
    // now force a 3rd rebalance, assigning no partition
    mockConsumer.rebalance(List.of());
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) CountDownLatch(java.util.concurrent.CountDownLatch) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.junit.jupiter.api.Test)

Example 9 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnStop.

/**
 * Verifies that the consumer commits the last fully handled records when it is stopped.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOnStop(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, complete the handling of some of them
    recordsHandlingPromiseMap.get(0L).complete();
    recordsHandlingPromiseMap.get(1L).complete();
    // offset 3 not completed yet, hence offset 1 is the latest in the row of fully handled records
    final int latestFullyHandledOffset = 1;
    recordsHandlingPromiseMap.get(4L).complete();
    // otherwise mockConsumer committed() can't be called
    mockConsumer.setSkipSettingClosedFlagOnNextClose();
    // now close the consumer
    consumer.stop().onComplete(v -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset + 1L);
        });
        ctx.completeNow();
    });
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 10 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class KafkaCommandProcessingQueue method remove.

/**
 * Removes the command represented by the given command context.
 * <p>
 * To be used for commands for which processing resulted in an error
 * and {@link #applySendCommandAction(KafkaBasedCommandContext, Supplier)}
 * will not be invoked.
 *
 * @return {@code true} if the command was removed.
 * @param commandContext The context containing the command to remove.
 */
public boolean remove(final KafkaBasedCommandContext commandContext) {
    Objects.requireNonNull(commandContext);
    final KafkaConsumerRecord<String, Buffer> record = commandContext.getCommand().getRecord();
    final TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
    return Optional.ofNullable(commandQueues.get(topicPartition)).map(commandQueue -> commandQueue.remove(commandContext)).orElse(false);
}
Also used : Buffer(io.vertx.core.buffer.Buffer) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) Predicate(java.util.function.Predicate) CommandToBeReprocessedException(org.eclipse.hono.client.command.CommandToBeReprocessedException) Collection(java.util.Collection) Promise(io.vertx.core.Promise) LoggerFactory(org.slf4j.LoggerFactory) ServerErrorException(org.eclipse.hono.client.ServerErrorException) HashMap(java.util.HashMap) Deque(java.util.Deque) Supplier(java.util.function.Supplier) Context(io.vertx.core.Context) Future(io.vertx.core.Future) KafkaBasedCommandContext(org.eclipse.hono.client.command.kafka.KafkaBasedCommandContext) Objects(java.util.Objects) CommandAlreadyProcessedException(org.eclipse.hono.client.command.CommandAlreadyProcessedException) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Map(java.util.Map) Pair(org.eclipse.hono.util.Pair) Optional(java.util.Optional) TracingHelper(org.eclipse.hono.tracing.TracingHelper) LinkedList(java.util.LinkedList) TopicPartition(org.apache.kafka.common.TopicPartition)

Aggregations

Buffer (io.vertx.core.buffer.Buffer)27 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)27 Vertx (io.vertx.core.Vertx)25 List (java.util.List)25 Future (io.vertx.core.Future)24 Handler (io.vertx.core.Handler)23 Promise (io.vertx.core.Promise)22 Map (java.util.Map)22 Logger (org.slf4j.Logger)22 LoggerFactory (org.slf4j.LoggerFactory)22 Truth.assertThat (com.google.common.truth.Truth.assertThat)21 Optional (java.util.Optional)21 Set (java.util.Set)21 UUID (java.util.UUID)21 Test (org.junit.jupiter.api.Test)21 VertxExtension (io.vertx.junit5.VertxExtension)20 VertxTestContext (io.vertx.junit5.VertxTestContext)20 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)20 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)20 ArrayList (java.util.ArrayList)19