Search in sources :

Example 21 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class HonoKafkaConsumerTest method testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords.

/**
 * Verifies that the HonoKafkaConsumer doesn't invoke the provided handler on received records whose ttl has expired.
 *
 * @param ctx The vert.x test context.
 */
@Test
public void testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords(final VertxTestContext ctx) {
    final int numNonExpiredTestRecords = 5;
    final Checkpoint receivedRecordsCheckpoint = ctx.checkpoint(numNonExpiredTestRecords);
    final Handler<KafkaConsumerRecord<String, Buffer>> handler = record -> {
        receivedRecordsCheckpoint.flag();
    };
    final Checkpoint expiredRecordCheckpoint = ctx.checkpoint(1);
    final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumer = new HonoKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {

        @Override
        protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord<String, Buffer> record) {
            expiredRecordCheckpoint.flag();
        }
    };
    consumer.setKafkaConsumerSupplier(() -> mockConsumer);
    mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L));
    mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L));
    mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition));
    consumer.start().onComplete(ctx.succeeding(v2 -> {
        mockConsumer.schedulePollTask(() -> {
            // add record with elapsed ttl
            mockConsumer.addRecord(createRecordWithElapsedTtl());
            IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> {
                mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
            });
        });
    }));
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) Json(io.vertx.core.json.Json) LoggerFactory(org.slf4j.LoggerFactory) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Timeout(io.vertx.junit5.Timeout) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) KafkaMockConsumer(org.eclipse.hono.kafka.test.KafkaMockConsumer) Vertx(io.vertx.core.Vertx) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Truth.assertThat(com.google.common.truth.Truth.assertThat) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Buffer(io.vertx.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) Pattern(java.util.regex.Pattern) Handler(io.vertx.core.Handler) Buffer(io.vertx.core.buffer.Buffer) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 22 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class KafkaBasedMappingAndDelegatingCommandHandler method mapAndDelegateIncomingCommandMessage.

/**
 * Delegates an incoming command to the protocol adapter instance that the target
 * device is connected to.
 * <p>
 * Determines the target gateway (if applicable) and protocol adapter instance for an incoming command
 * and delegates the command to the resulting protocol adapter instance.
 *
 * @param consumerRecord The consumer record corresponding to the command.
 * @return A future indicating the outcome of the operation.
 * @throws NullPointerException if any of the parameters is {@code null}.
 */
public Future<Void> mapAndDelegateIncomingCommandMessage(final KafkaConsumerRecord<String, Buffer> consumerRecord) {
    Objects.requireNonNull(consumerRecord);
    final Timer.Sample timer = getMetrics().startTimer();
    final KafkaBasedCommand command;
    try {
        command = KafkaBasedCommand.from(consumerRecord);
    } catch (final IllegalArgumentException exception) {
        log.debug("command record is invalid", exception);
        return Future.failedFuture("command record is invalid");
    }
    final SpanContext spanContext = KafkaTracingHelper.extractSpanContext(tracer, consumerRecord);
    final Span currentSpan = createSpan(command.getTenant(), command.getDeviceId(), spanContext);
    KafkaTracingHelper.setRecordTags(currentSpan, consumerRecord);
    final KafkaBasedCommandContext commandContext = new KafkaBasedCommandContext(command, kafkaBasedCommandResponseSender, currentSpan);
    command.logToSpan(currentSpan);
    if (!command.isValid()) {
        log.debug("received invalid command record [{}]", command);
        return tenantClient.get(command.getTenant(), currentSpan.context()).compose(tenantConfig -> {
            commandContext.put(CommandContext.KEY_TENANT_CONFIG, tenantConfig);
            return Future.failedFuture("command is invalid");
        }).onComplete(ar -> {
            commandContext.reject("malformed command message");
            reportInvalidCommand(commandContext, timer);
        }).mapEmpty();
    }
    log.trace("received valid command record [{}]", command);
    commandQueue.add(commandContext);
    final Promise<Void> resultPromise = Promise.promise();
    final long timerId = vertx.setTimer(PROCESSING_TIMEOUT.toMillis(), tid -> {
        if (commandQueue.remove(commandContext) || !commandContext.isCompleted()) {
            log.info("command processing timed out after {}s [{}]", PROCESSING_TIMEOUT.toSeconds(), commandContext.getCommand());
            TracingHelper.logError(commandContext.getTracingSpan(), String.format("command processing timed out after %ds", PROCESSING_TIMEOUT.toSeconds()));
            final ServerErrorException error = new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "command processing timed out");
            commandContext.release(error);
            resultPromise.tryFail(error);
        }
    });
    mapAndDelegateIncomingCommand(commandContext, timer).onComplete(ar -> {
        vertx.cancelTimer(timerId);
        if (ar.failed()) {
            commandQueue.remove(commandContext);
        }
        Futures.tryHandleResult(resultPromise, ar);
    });
    return resultPromise.future();
}
Also used : HttpURLConnection(java.net.HttpURLConnection) KafkaBasedCommandResponseSender(org.eclipse.hono.client.command.kafka.KafkaBasedCommandResponseSender) AbstractMappingAndDelegatingCommandHandler(org.eclipse.hono.commandrouter.impl.AbstractMappingAndDelegatingCommandHandler) MessagingType(org.eclipse.hono.util.MessagingType) Timer(io.micrometer.core.instrument.Timer) KafkaBasedInternalCommandSender(org.eclipse.hono.client.command.kafka.KafkaBasedInternalCommandSender) Duration(java.time.Duration) TracingHelper(org.eclipse.hono.tracing.TracingHelper) KafkaTracingHelper(org.eclipse.hono.client.kafka.tracing.KafkaTracingHelper) Futures(org.eclipse.hono.util.Futures) Tracer(io.opentracing.Tracer) KafkaBasedCommand(org.eclipse.hono.client.command.kafka.KafkaBasedCommand) Promise(io.vertx.core.Promise) CommandContext(org.eclipse.hono.client.command.CommandContext) Vertx(io.vertx.core.Vertx) ServerErrorException(org.eclipse.hono.client.ServerErrorException) CommandTargetMapper(org.eclipse.hono.commandrouter.CommandTargetMapper) TenantClient(org.eclipse.hono.client.registry.TenantClient) Future(io.vertx.core.Future) KafkaBasedCommandContext(org.eclipse.hono.client.command.kafka.KafkaBasedCommandContext) TenantObject(org.eclipse.hono.util.TenantObject) SpanContext(io.opentracing.SpanContext) Objects(java.util.Objects) List(java.util.List) CommandRouterMetrics(org.eclipse.hono.commandrouter.CommandRouterMetrics) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Span(io.opentracing.Span) SpanContext(io.opentracing.SpanContext) Timer(io.micrometer.core.instrument.Timer) KafkaBasedCommand(org.eclipse.hono.client.command.kafka.KafkaBasedCommand) ServerErrorException(org.eclipse.hono.client.ServerErrorException) KafkaBasedCommandContext(org.eclipse.hono.client.command.kafka.KafkaBasedCommandContext) Span(io.opentracing.Span)

Example 23 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
 * all still available records after the committed offset position has gone out of range
 * (because records have been deleted according to the retention config) and the consumer is restarted.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 1;
    final int numTestRecordsPerTopicPerRound = 20;
    // has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
    final int numPartitions = 1;
    // prepare topics
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
    createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
            LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
            kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
                LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
                publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
                    LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
                    final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
                    waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
                });
            });
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // first start of consumer, letting it commit offsets
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
        publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
    }));
    assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (firstConsumerInstanceStartedAndStopped.failed()) {
        ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
        return;
    }
    // preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
    receivedRecords.clear();
    final String lastRecordKey = "lastKey";
    // restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
    final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == expectedNumberOfRecords) {
            ctx.verify(() -> {
                assertThat(receivedRecords.get(0).key()).startsWith("round3");
                assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
            });
            ctx.completeNow();
        }
    };
    LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
    publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
        kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
        kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
            LOG.debug("consumer started, publish another record to be received by the consumer");
            publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
        }));
    });
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) TopicPartition(io.vertx.kafka.client.common.TopicPartition) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 24 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
 * subscription only receives records published after the consumer <em>start()</em> method has completed.
 * <p>
 * Also verifies that all records published after the consumer <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
 * method has completed are received by the consumer, also if the topic was only created after the consumer
 * <em>start</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final String patternPrefix = "test_" + UUID.randomUUID() + "_";
    final int numTopics = 2;
    final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> patternPrefix + i).collect(Collectors.toSet());
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        final Promise<Void> nextRecordReceivedPromise = Promise.promise();
        nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
        LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
        final String newTopic = patternPrefix + "new";
        final String recordKey = "addedAfterStartKey";
        kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic).onComplete(ctx.succeeding(v2 -> {
            LOG.debug("publish record to be received by the consumer");
            publish(newTopic, recordKey, Buffer.buffer("testPayload"));
        }));
        nextRecordReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(1);
                assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
            });
            ctx.completeNow();
        });
    }));
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) Pattern(java.util.regex.Pattern) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) AtomicReference(java.util.concurrent.atomic.AtomicReference) Promise(io.vertx.core.Promise) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 25 with KafkaConsumerRecord

use of io.vertx.kafka.client.consumer.KafkaConsumerRecord in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterStart.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy only receives
 * records published after the consumer <em>start()</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 2;
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final String publishedAfterStartRecordKey = "publishedAfterStartKey";
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        // verify received record
        ctx.verify(() -> assertThat(record.key()).isEqualTo(publishedAfterStartRecordKey));
        ctx.completeNow();
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.debug("consumer started, publish record to be received by the consumer");
        publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload"));
    }));
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException("timeout waiting for record to be received"));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

Buffer (io.vertx.core.buffer.Buffer)27 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)27 Vertx (io.vertx.core.Vertx)25 List (java.util.List)25 Future (io.vertx.core.Future)24 Handler (io.vertx.core.Handler)23 Promise (io.vertx.core.Promise)22 Map (java.util.Map)22 Logger (org.slf4j.Logger)22 LoggerFactory (org.slf4j.LoggerFactory)22 Truth.assertThat (com.google.common.truth.Truth.assertThat)21 Optional (java.util.Optional)21 Set (java.util.Set)21 UUID (java.util.UUID)21 Test (org.junit.jupiter.api.Test)21 VertxExtension (io.vertx.junit5.VertxExtension)20 VertxTestContext (io.vertx.junit5.VertxTestContext)20 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)20 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)20 ArrayList (java.util.ArrayList)19