Search in sources :

Example 26 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology.

@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology() throws Exception {
    CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
    // Build up named topology with two stateful subtopologies
    final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
    inputStream1.groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream1.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    streams.start();
    final NamedTopology namedTopology = topology1Builder.build();
    streams.addNamedTopology(namedTopology).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    streams.removeNamedTopology("topology-1", true).all().get();
    streams.cleanUpNamedTopology("topology-1");
    CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("changelog")).forEach(t -> {
        try {
            CLUSTER.deleteTopicAndWait(t);
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
    });
    final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
    inputStream.groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    final NamedTopology namedTopologyDup = topology1BuilderDup.build();
    streams.addNamedTopology(namedTopologyDup).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) Test(org.junit.Test)

Example 27 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterStart.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy only receives
 * records published after the consumer <em>start()</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 2;
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final String publishedAfterStartRecordKey = "publishedAfterStartKey";
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        // verify received record
        ctx.verify(() -> assertThat(record.key()).isEqualTo(publishedAfterStartRecordKey));
        ctx.completeNow();
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.debug("consumer started, publish record to be received by the consumer");
        publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload"));
    }));
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("timeout waiting for record to be received"));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 28 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class HonoKafkaConsumerTest method testConsumerInvokesHandlerOnReceivedRecords.

/**
 * Verifies that the HonoKafkaConsumer invokes the provided handler on received records.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerInvokesHandlerOnReceivedRecords(final VertxTestContext ctx) {
    final int numTestRecords = 5;
    final Checkpoint receivedRecordsCheckpoint = ctx.checkpoint(numTestRecords);
    final Handler<KafkaConsumerRecord<String, Buffer>> handler = record -> {
        receivedRecordsCheckpoint.flag();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumer = new HonoKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L));
    mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L));
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            IntStream.range(0, numTestRecords).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) Json(io.vertx.core.json.Json) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Timeout(io.vertx.junit5.Timeout) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Vertx(io.vertx.core.Vertx) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Truth.assertThat(com.google.common.truth.Truth.assertThat) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Handler(io.vertx.core.Handler) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 29 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class HonoKafkaConsumerTest method testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords.

/**
 * Verifies that the HonoKafkaConsumer doesn't invoke the provided handler on received records whose ttl has expired.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords(final VertxTestContext ctx) {
    final int numNonExpiredTestRecords = 5;
    final Checkpoint receivedRecordsCheckpoint = ctx.checkpoint(numNonExpiredTestRecords);
    final Handler<KafkaConsumerRecord<String, Buffer>> handler = record -> {
        receivedRecordsCheckpoint.flag();
    };
    final Checkpoint expiredRecordCheckpoint = ctx.checkpoint(1);
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumer = new HonoKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {

        @Override
        protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord<String, Buffer> record) {
            expiredRecordCheckpoint.flag();
        }
    };
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L));
    mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L));
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            // add record with elapsed ttl
            mockConsumer.addRecord(createRecordWithElapsedTtl());
            IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) Json(io.vertx.core.json.Json) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Timeout(io.vertx.junit5.Timeout) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Vertx(io.vertx.core.Vertx) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Truth.assertThat(com.google.common.truth.Truth.assertThat) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Handler(io.vertx.core.Handler) Buffer(io.vertx.core.buffer.Buffer) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 30 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerRespectsMaxRecordsInProcessingLimit.

/**
 * Verifies that the maximum number of records in processing by the consumer does not exceed
 * the limit of 1.5 times the <em>max.poll.records</em> config value.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerRespectsMaxRecordsInProcessingLimit(final VertxTestContext ctx) {
    final int maxPollRecords = 10;
    final int throttlingThreshold = maxPollRecords * AsyncHandlingAutoCommitKafkaConsumer.THROTTLING_THRESHOLD_PERCENTAGE_OF_MAX_POLL_RECORDS / 100;
    final int maxRecordsInProcessing = maxPollRecords + Math.max(throttlingThreshold, 1);
    final int numTestBatches = 5;
    final int numRecordsPerBatch = maxPollRecords;
    final int numRecords = numTestBatches * numRecordsPerBatch;
    final AtomicInteger offsetCounter = new AtomicInteger();
    final Promise<Void> allRecordsReceivedPromise = Promise.promise();
    final List<Promise<Void>> uncompletedRecordHandlingPromises = new ArrayList<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final AtomicInteger observedMaxRecordsInProcessing = new AtomicInteger();
    final AtomicInteger testBatchesToAdd = new AtomicInteger(numTestBatches);
    // let the consumer record handler only complete the record processing when consumer record fetching is already paused (or if all records have been received)
    final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> recordHandler = record -> {
        receivedRecords.add(record);
        final Promise<Void> recordHandlingCompleted = Promise.promise();
        uncompletedRecordHandlingPromises.add(recordHandlingCompleted);
        if (consumer.isRecordFetchingPaused() || receivedRecords.size() == numRecords) {
            if (uncompletedRecordHandlingPromises.size() > observedMaxRecordsInProcessing.get()) {
                observedMaxRecordsInProcessing.set(uncompletedRecordHandlingPromises.size());
            }
            if (receivedRecords.size() == numRecords) {
                LOG.trace("complete all remaining {} record handling promises", uncompletedRecordHandlingPromises.size());
                uncompletedRecordHandlingPromises.forEach(Promise::tryComplete);
                uncompletedRecordHandlingPromises.clear();
            } else {
                // complete record handling promises until consumer record fetching isn't paused anymore
                completeUntilConsumerRecordFetchingResumed(uncompletedRecordHandlingPromises.iterator());
            }
        }
        if (receivedRecords.size() == numRecords) {
            vertx.runOnContext(v -> allRecordsReceivedPromise.tryComplete());
        }
        return recordHandlingCompleted.future();
    };
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.toString(maxPollRecords));
    consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), recordHandler, consumerConfig);
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
    mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        // schedule the poll tasks
        schedulePollTasksWithConsumerPausedCheck(offsetCounter, numRecordsPerBatch, testBatchesToAdd);
        final long timerId = vertx.setTimer(8000, tid -> {
            LOG.info("received records:\n{}", receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n")));
            allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", uncompletedRecordHandlingPromises.size(), numRecords));
        });
        allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> {
            vertx.cancelTimer(timerId);
            ctx.verify(() -> {
                assertWithMessage("observed max no. of records in processing").that(observedMaxRecordsInProcessing.get()).isEqualTo(maxRecordsInProcessing);
            });
            ctx.completeNow();
        }));
    }));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Context(io.vertx.core.Context) Timeout(io.vertx.junit5.Timeout) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) Json(io.vertx.core.json.Json) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongStream(java.util.stream.LongStream) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) Handler(io.vertx.core.Handler) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) Promise(io.vertx.core.Promise) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test)

Aggregations

ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)35 List (java.util.List)31 Map (java.util.Map)24 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 Pattern (java.util.regex.Pattern)23 Optional (java.util.Optional)22 TimeUnit (java.util.concurrent.TimeUnit)20 UUID (java.util.UUID)19 Handler (io.vertx.core.Handler)18 Vertx (io.vertx.core.Vertx)18 Buffer (io.vertx.core.buffer.Buffer)18 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)18 Logger (org.slf4j.Logger)18 LoggerFactory (org.slf4j.LoggerFactory)18 Instant (java.time.Instant)17 HashMap (java.util.HashMap)17 Truth.assertThat (com.google.common.truth.Truth.assertThat)16 Future (io.vertx.core.Future)16 Promise (io.vertx.core.Promise)16