Search in sources :

Example 16 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion.

/**
 * Verifies that the consumer commits record offsets on rebalance, having waited some time for record
 * handling to be completed.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "21000");
    final AtomicReference<Handler<Void>> onNextPartitionsRevokedBlockingHandlerRef = new AtomicReference<>();
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {

        @Override
        protected void onPartitionsRevokedBlocking(final Set<io.vertx.kafka.client.common.TopicPartition> partitionsSet) {
            Optional.ofNullable(onNextPartitionsRevokedBlockingHandlerRef.get()).ifPresent(handler -> handler.handle(null));
            onNextPartitionsRevokedBlockingHandlerRef.set(null);
            super.onPartitionsRevokedBlocking(partitionsSet);
        }
    };
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    final Context consumerVertxContext = vertx.getOrCreateContext();
    consumerVertxContext.runOnContext(v -> {
        consumer.start().onComplete(ctx.succeeding(v2 -> {
            mockConsumer.schedulePollTask(() -> {
                IntStream.range(0, numTestRecords).forEach(offset -> {
                    mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
                });
            });
        }));
    });
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, complete the handling of all except the first 2 records
    LongStream.range(2, numTestRecords).forEach(offset -> recordsHandlingPromiseMap.get(offset).complete());
    ctx.verify(() -> assertThat(recordsHandlingPromiseMap.get(1L).future().isComplete()).isFalse());
    // partitions revoked handler shall get called after the blocking partitions-revoked handling has waited for the records to be marked as completed
    consumer.setOnPartitionsRevokedHandler(s -> {
        ctx.verify(() -> assertThat(recordsHandlingPromiseMap.get(1L).future().isComplete()).isTrue());
    });
    final Checkpoint commitCheckDone = ctx.checkpoint(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(numTestRecords);
        });
        commitCheckDone.flag();
    });
    // trigger a rebalance where the currently assigned partition is revoked
    // (and then assigned again - otherwise its offset wouldn't be returned by mockConsumer.committed())
    // the remaining 2 records are to be marked as completed with some delay
    onNextPartitionsRevokedBlockingHandlerRef.set(v -> {
        consumerVertxContext.runOnContext(v2 -> {
            recordsHandlingPromiseMap.get(0L).complete();
            recordsHandlingPromiseMap.get(1L).complete();
        });
    });
    mockConsumer.setRevokeAllOnRebalance(true);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.setNextPollRebalancePartitionAssignment(List.of(TOPIC_PARTITION, TOPIC2_PARTITION));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Context(io.vertx.core.Context) VertxTestContext(io.vertx.junit5.VertxTestContext) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Handler(io.vertx.core.Handler) AtomicReference(java.util.concurrent.atomic.AtomicReference) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 17 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsInitialOffset.

/**
 * Verifies that the consumer commits the initial partition offset on the first offset commit after
 * the partition got assigned to the consumer.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsInitialOffset(final VertxTestContext ctx) throws InterruptedException {
    final Promise<Void> testRecordsReceived = Promise.promise();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        testRecordsReceived.complete();
        return Future.succeededFuture();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // 1000ms commit interval - keep the value not too low,
    // otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    final VertxTestContext consumerStartedCtx = new VertxTestContext();
    final Checkpoint consumerStartedCheckpoint = consumerStartedCtx.checkpoint(2);
    consumer.setOnRebalanceDoneHandler(s -> consumerStartedCheckpoint.flag());
    vertx.getOrCreateContext().runOnContext(v -> {
        consumer.start().onSuccess(v2 -> consumerStartedCheckpoint.flag());
    });
    assertWithMessage("consumer started in 5s").that(consumerStartedCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (consumerStartedCtx.failed()) {
        ctx.failNow(consumerStartedCtx.causeOfFailure());
        return;
    }
    final List<Map<TopicPartition, OffsetAndMetadata>> reportedCommits = new ArrayList<>();
    mockConsumer.addCommitListener(reportedCommits::add);
    final CountDownLatch rebalance1Done = new CountDownLatch(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION, TOPIC2_PARTITION));
        ctx.verify(() -> {
            // the rebalance where topicPartition got revoked should have triggered a commit of offset 0 for topicPartition
            assertThat(reportedCommits.size()).isEqualTo(1);
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(0);
        });
    });
    consumer.setOnRebalanceDoneHandler(s -> rebalance1Done.countDown());
    // now force a rebalance which should trigger the above onPartitionsAssignedHandler
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
    mockConsumer.rebalance(List.of(TOPIC2_PARTITION));
    if (!rebalance1Done.await(5, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("partitionsAssigned handler not invoked"));
    }
    final CountDownLatch rebalance2Done = new CountDownLatch(1);
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION, TOPIC2_PARTITION));
        ctx.verify(() -> {
            // the 2nd rebalance where topic2Partition got revoked and topicPartition got assigned
            // should have triggered a commit of offset 0 for topic2Partition
            assertThat(reportedCommits.size()).isEqualTo(2);
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC2_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(0);
        });
    });
    consumer.setOnRebalanceDoneHandler(s -> rebalance2Done.countDown());
    // now again force a rebalance which should trigger the above onPartitionsAssignedHandler
    // - this time again with the first partition
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.rebalance(List.of(TOPIC_PARTITION));
    if (!rebalance2Done.await(5, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("partitionsAssigned handler not invoked"));
    }
    consumer.setOnPartitionsAssignedHandler(partitions -> {
        ctx.verify(() -> {
            // the 3rd rebalance where all partitions got revoked should have triggered no new commits
            assertThat(reportedCommits.size()).isEqualTo(2);
        });
        ctx.completeNow();
    });
    // now force a 3rd rebalance, assigning no partition
    mockConsumer.rebalance(List.of());
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) CountDownLatch(java.util.concurrent.CountDownLatch) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.junit.jupiter.api.Test)

Example 18 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnStop.

/**
 * Verifies that the consumer commits the last fully handled records when it is stopped.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if the test execution gets interrupted.
 */
@Test
public void testConsumerCommitsOffsetsOnStop(final VertxTestContext ctx) throws InterruptedException {
    final int numTestRecords = 5;
    final VertxTestContext receivedRecordsCtx = new VertxTestContext();
    final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
    final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
        final Promise<Void> promise = Promise.promise();
        if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
            receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
        }
        receivedRecordsCheckpoint.flag();
        return promise.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    // periodic commit shall not play a role here
    consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
    consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
    assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (receivedRecordsCtx.failed()) {
        ctx.failNow(receivedRecordsCtx.causeOfFailure());
        return;
    }
    // records received, complete the handling of some of them
    recordsHandlingPromiseMap.get(0L).complete();
    recordsHandlingPromiseMap.get(1L).complete();
    // offset 3 not completed yet, hence offset 1 is the latest in the row of fully handled records
    final int latestFullyHandledOffset = 1;
    recordsHandlingPromiseMap.get(4L).complete();
    // otherwise mockConsumer committed() can't be called
    mockConsumer.setSkipSettingClosedFlagOnNextClose();
    // now close the consumer
    consumer.stop().onComplete(v -> {
        final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
        ctx.verify(() -> {
            final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
            assertThat(offsetAndMetadata).isNotNull();
            assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset + 1L);
        });
        ctx.completeNow();
    });
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) VertxTestContext(io.vertx.junit5.VertxTestContext) HashMap(java.util.HashMap) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Promise(io.vertx.core.Promise) Checkpoint(io.vertx.junit5.Checkpoint) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Example 19 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class HonoKafkaConsumer method start.

@Override
public Future<Void> start() {
    context = vertx.getOrCreateContext();
    final Promise<Void> startPromise = Promise.promise();
    runOnContext(v -> {
        // create KafkaConsumer here so that it is created in the Vert.x context of the start() method (KafkaConsumer uses vertx.getOrCreateContext())
        Optional.ofNullable(kafkaConsumerSupplier).map(supplier -> Future.succeededFuture(KafkaConsumer.create(vertx, supplier.get()))).orElseGet(() -> {
            final KafkaClientFactory kafkaClientFactory = new KafkaClientFactory(vertx);
            return kafkaClientFactory.createKafkaConsumerWithRetries(consumerConfig, String.class, Buffer.class, consumerCreationRetriesTimeout);
        }).onFailure(thr -> {
            log.error("error creating consumer [client-id: {}]", getClientId(), thr);
            startPromise.fail(thr);
        }).onSuccess(consumer -> {
            kafkaConsumer = consumer;
            Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.registerKafkaConsumer(kafkaConsumer.unwrap()));
            kafkaConsumer.handler(record -> {
                if (!startPromise.future().isComplete()) {
                    log.debug("postponing record handling until start() is completed [topic: {}, partition: {}, offset: {}]", record.topic(), record.partition(), record.offset());
                }
                startPromise.future().onSuccess(v2 -> {
                    if (respectTtl && KafkaRecordHelper.isTtlElapsed(record.headers())) {
                        onRecordHandlerSkippedForExpiredRecord(record);
                    } else {
                        try {
                            recordHandler.handle(record);
                        } catch (final Exception e) {
                            log.warn("error handling record [topic: {}, partition: {}, offset: {}, headers: {}]", record.topic(), record.partition(), record.offset(), record.headers(), e);
                        }
                    }
                });
            });
            kafkaConsumer.batchHandler(this::onBatchOfRecordsReceived);
            kafkaConsumer.exceptionHandler(error -> log.error("consumer error occurred [client-id: {}]", getClientId(), error));
            installRebalanceListeners();
            // subscribe and wait for rebalance to make sure that when start() completes,
            // the consumer is actually ready to receive records already
            // let polls finish quickly until start() is completed
            kafkaConsumer.asStream().pollTimeout(Duration.ofMillis(10));
            subscribeAndWaitForRebalance().onSuccess(v2 -> {
                kafkaConsumer.asStream().pollTimeout(pollTimeout);
                logSubscribedTopicsOnStartComplete();
            }).onComplete(startPromise);
        });
    });
    return startPromise.future();
}
Also used : HttpURLConnection(java.net.HttpURLConnection) Lifecycle(org.eclipse.hono.util.Lifecycle) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RegisterForReflection(io.quarkus.runtime.annotations.RegisterForReflection) KafkaConsumerRecords(io.vertx.kafka.client.consumer.KafkaConsumerRecords) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) KafkaClientFactory(org.eclipse.hono.client.kafka.KafkaClientFactory) Context(io.vertx.core.Context) HashSet(java.util.HashSet) CompositeFuture(io.vertx.core.CompositeFuture) Helper(io.vertx.kafka.client.common.impl.Helper) KafkaClientMetricsSupport(org.eclipse.hono.client.kafka.metrics.KafkaClientMetricsSupport) Duration(java.time.Duration) Map(java.util.Map) Pair(org.eclipse.hono.util.Pair) KafkaReadStreamImpl(io.vertx.kafka.client.consumer.impl.KafkaReadStreamImpl) LinkedList(java.util.LinkedList) ExecutorService(java.util.concurrent.ExecutorService) Consumer(org.apache.kafka.clients.consumer.Consumer) Futures(org.eclipse.hono.util.Futures) Logger(org.slf4j.Logger) Collection(java.util.Collection) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Set(java.util.Set) ServerErrorException(org.eclipse.hono.client.ServerErrorException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) KafkaRecordHelper(org.eclipse.hono.client.kafka.KafkaRecordHelper) Field(java.lang.reflect.Field) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Objects(java.util.Objects) TimeUnit(java.util.concurrent.TimeUnit) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Stream(java.util.stream.Stream) Metrics(org.apache.kafka.common.metrics.Metrics) Buffer(io.vertx.core.buffer.Buffer) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) Handler(io.vertx.core.Handler) KafkaClientFactory(org.eclipse.hono.client.kafka.KafkaClientFactory) ServerErrorException(org.eclipse.hono.client.ServerErrorException)

Example 20 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka-streams-examples by confluentinc.

the class UserCountsPerRegionLambdaIntegrationTest method shouldCountUsersPerRegion.

@Test
public void shouldCountUsersPerRegion() throws Exception {
    // Input: Region per user (multiple records allowed per user).
    List<KeyValue<String, String>> userRegionRecords = Arrays.asList(// This first record for Alice tells us that she is currently in Asia.
    new KeyValue<>("alice", "asia"), // First record for Bob.
    new KeyValue<>("bob", "europe"), // from Asia to Europe;  in other words, it's a location update for Alice.
    new KeyValue<>("alice", "europe"), // Second record for Bob, who moved from Europe to Asia (i.e. the opposite direction of Alice).
    new KeyValue<>("bob", "asia"));
    List<KeyValue<String, Long>> expectedUsersPerRegion = Arrays.asList(// in the end, Alice is in europe
    new KeyValue<>("europe", 1L), // in the end, Bob is in asia
    new KeyValue<>("asia", 1L));
    // 
    // Step 1: Configure and start the processor topology.
    // 
    final Serde<String> stringSerde = Serdes.String();
    final Serde<Long> longSerde = Serdes.Long();
    Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "user-regions-lambda-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    // The commit interval for flushing records to state stores and downstream must be lower than
    // this integration test's timeout (30 secs) to ensure we observe the expected processing results.
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000);
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
    StreamsBuilder builder = new StreamsBuilder();
    KTable<String, String> userRegionsTable = builder.table(inputTopic);
    KTable<String, Long> usersPerRegionTable = userRegionsTable.groupBy((userId, region) -> KeyValue.pair(region, region)).count();
    usersPerRegionTable.toStream().to(outputTopic, Produced.with(stringSerde, longSerde));
    KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
    streams.start();
    // 
    // Step 2: Publish user-region information.
    // 
    Properties userRegionsProducerConfig = new Properties();
    userRegionsProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    userRegionsProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
    userRegionsProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
    userRegionsProducerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    userRegionsProducerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    IntegrationTestUtils.produceKeyValuesSynchronously(inputTopic, userRegionRecords, userRegionsProducerConfig);
    // 
    // Step 3: Verify the application's output data.
    // 
    Properties consumerConfig = new Properties();
    consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "user-regions-lambda-integration-test-standard-consumer");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
    List<KeyValue<String, Long>> actualClicksPerRegion = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedUsersPerRegion.size());
    streams.close();
    assertThat(actualClicksPerRegion).containsExactlyElementsOf(expectedUsersPerRegion);
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) KTable(org.apache.kafka.streams.kstream.KTable) Arrays(java.util.Arrays) Properties(java.util.Properties) BeforeClass(org.junit.BeforeClass) Produced(org.apache.kafka.streams.kstream.Produced) TestUtils(org.apache.kafka.test.TestUtils) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) List(java.util.List) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Serde(org.apache.kafka.common.serialization.Serde) EmbeddedSingleNodeKafkaCluster(io.confluent.examples.streams.kafka.EmbeddedSingleNodeKafkaCluster) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) KafkaStreams(org.apache.kafka.streams.KafkaStreams) ClassRule(org.junit.ClassRule) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Test(org.junit.Test)

Aggregations

ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)35 List (java.util.List)31 Map (java.util.Map)24 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 Pattern (java.util.regex.Pattern)23 Optional (java.util.Optional)22 TimeUnit (java.util.concurrent.TimeUnit)20 UUID (java.util.UUID)19 Handler (io.vertx.core.Handler)18 Vertx (io.vertx.core.Vertx)18 Buffer (io.vertx.core.buffer.Buffer)18 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)18 Logger (org.slf4j.Logger)18 LoggerFactory (org.slf4j.LoggerFactory)18 Instant (java.time.Instant)17 HashMap (java.util.HashMap)17 Truth.assertThat (com.google.common.truth.Truth.assertThat)16 Future (io.vertx.core.Future)16 Promise (io.vertx.core.Promise)16