Search in sources :

Example 1 with TopicPartition

use of io.vertx.kafka.client.common.TopicPartition in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
 * all still available records after the committed offset position has gone out of range
 * (because records have been deleted according to the retention config) and the consumer is restarted.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 1;
    final int numTestRecordsPerTopicPerRound = 20;
    // has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
    final int numPartitions = 1;
    // prepare topics
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
    createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
            LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
            kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
                LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
                publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
                    LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
                    final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
                    waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
                });
            });
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // first start of consumer, letting it commit offsets
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
        publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
    }));
    assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (firstConsumerInstanceStartedAndStopped.failed()) {
        ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
        return;
    }
    // preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
    receivedRecords.clear();
    final String lastRecordKey = "lastKey";
    // restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
    final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == expectedNumberOfRecords) {
            ctx.verify(() -> {
                assertThat(receivedRecords.get(0).key()).startsWith("round3");
                assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
            });
            ctx.completeNow();
        }
    };
    LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
    publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
        kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
        kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
            LOG.debug("consumer started, publish another record to be received by the consumer");
            publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
        }));
    });
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) TopicPartition(io.vertx.kafka.client.common.TopicPartition) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Aggregations

Truth.assertThat (com.google.common.truth.Truth.assertThat)1 CompositeFuture (io.vertx.core.CompositeFuture)1 Future (io.vertx.core.Future)1 Handler (io.vertx.core.Handler)1 Promise (io.vertx.core.Promise)1 Vertx (io.vertx.core.Vertx)1 Buffer (io.vertx.core.buffer.Buffer)1 Timeout (io.vertx.junit5.Timeout)1 VertxExtension (io.vertx.junit5.VertxExtension)1 VertxTestContext (io.vertx.junit5.VertxTestContext)1 KafkaAdminClient (io.vertx.kafka.admin.KafkaAdminClient)1 NewTopic (io.vertx.kafka.admin.NewTopic)1 TopicPartition (io.vertx.kafka.client.common.TopicPartition)1 KafkaConsumer (io.vertx.kafka.client.consumer.KafkaConsumer)1 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)1 KafkaProducer (io.vertx.kafka.client.producer.KafkaProducer)1 KafkaProducerRecord (io.vertx.kafka.client.producer.KafkaProducerRecord)1 RecordMetadata (io.vertx.kafka.client.producer.RecordMetadata)1 Duration (java.time.Duration)1 Instant (java.time.Instant)1