use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOfSkippedExpiredRecords.
/**
* Verifies that the consumer commits offsets for records whose ttl has expired.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testConsumerCommitsOffsetsOfSkippedExpiredRecords(final VertxTestContext ctx) throws InterruptedException {
final int numNonExpiredTestRecords = 5;
final VertxTestContext receivedRecordsCtx = new VertxTestContext();
final Checkpoint expiredRecordCheckpoint = receivedRecordsCtx.checkpoint(1);
final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numNonExpiredTestRecords);
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
receivedRecordsCheckpoint.flag();
return Future.succeededFuture();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// periodic commit shall not play a role here
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {
@Override
protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord<String, Buffer> record) {
super.onRecordHandlerSkippedForExpiredRecord(record);
expiredRecordCheckpoint.flag();
}
};
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
final Context consumerVertxContext = vertx.getOrCreateContext();
consumerVertxContext.runOnContext(v -> {
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
// add record with elapsed ttl
mockConsumer.addRecord(createRecordWithElapsedTtl());
IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
});
assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (receivedRecordsCtx.failed()) {
ctx.failNow(receivedRecordsCtx.causeOfFailure());
return;
}
final int numExpiredTestRecords = 1;
final int latestFullyHandledOffset = numNonExpiredTestRecords + numExpiredTestRecords - 1;
final VertxTestContext commitCheckContext = new VertxTestContext();
final Checkpoint commitCheckpoint = commitCheckContext.checkpoint(1);
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
ctx.verify(() -> {
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset + 1L);
});
commitCheckpoint.flag();
});
// now force a rebalance which should trigger the above onPartitionsAssignedHandler
// (rebalance is done as part of the poll() invocation; the vert.x consumer will schedule that invocation
// via an action executed on the event loop thread; do this here as well, meaning the record handler
// run on the event loop thread will be finished once the rebalance get triggered).
final CountDownLatch latch = new CountDownLatch(1);
consumerVertxContext.runOnContext(v -> latch.countDown());
latch.await();
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
assertWithMessage("partition assigned in 5s for checking of commits").that(commitCheckContext.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (commitCheckContext.failed()) {
ctx.failNow(commitCheckContext.causeOfFailure());
return;
}
ctx.completeNow();
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testScenarioWithPartitionRevokedWhileHandlingIncomplete.
/**
* Verifies that a scenario of a partition being revoked and not assigned again, while there are
* still not fully handled records, is identified by the consumer.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testScenarioWithPartitionRevokedWhileHandlingIncomplete(final VertxTestContext ctx) throws InterruptedException {
final int numTestRecords = 5;
final VertxTestContext receivedRecordsCtx = new VertxTestContext();
final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
final Promise<Void> promise = Promise.promise();
if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
}
receivedRecordsCheckpoint.flag();
return promise.future();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// periodic commit shall not play a role here
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
IntStream.range(0, numTestRecords).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (receivedRecordsCtx.failed()) {
ctx.failNow(receivedRecordsCtx.causeOfFailure());
return;
}
// records received, but their handling isn't completed yet
// do a rebalance with the currently assigned partition not being assigned anymore after it
mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
mockConsumer.rebalance(List.of(TOPIC2_PARTITION));
// mark the handling of some records as completed
recordsHandlingPromiseMap.get(0L).complete();
recordsHandlingPromiseMap.get(1L).complete();
recordsHandlingPromiseMap.get(2L).complete();
final Checkpoint commitCheckDone = ctx.checkpoint(1);
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
ctx.verify(() -> {
// the last rebalance where topicPartition got revoked should have just
// triggered a commit of offset 0; the 3 records that only got completed
// after the rebalance shouldn't have been taken into account in the commit
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(0);
});
commitCheckDone.flag();
});
// now force a rebalance which should trigger the above onPartitionsAssignedHandler
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsPeriodically.
/**
* Verifies that the consumer commits the last fully handled records periodically.
*
* @param ctx The vert.x test context.
*/
@Test
public void testConsumerCommitsOffsetsPeriodically(final VertxTestContext ctx) {
final Promise<Void> testRecordsReceived = Promise.promise();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
testRecordsReceived.complete();
return Future.succeededFuture();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// 1000ms commit interval - keep the value not too low,
// otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, 0, "key_0", Buffer.buffer()));
});
}));
testRecordsReceived.future().onComplete(v -> {
// we have no hook to integrate into for the commit check
// therefore do the check multiple times with some delay in between
final AtomicInteger checkCount = new AtomicInteger(0);
vertx.setPeriodic(200, tid -> {
checkCount.incrementAndGet();
// check offsets
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
if (!committed.isEmpty()) {
ctx.verify(() -> {
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(1L);
});
ctx.completeNow();
vertx.cancelTimer(tid);
} else {
if (checkCount.get() >= 10) {
vertx.cancelTimer(tid);
ctx.failNow(new AssertionError("offset should have been committed"));
}
}
});
});
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
* all still available records after the committed offset position has gone out of range
* (because records have been deleted according to the retention config) and the consumer is restarted.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 1;
final int numTestRecordsPerTopicPerRound = 20;
// has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
final int numPartitions = 1;
// prepare topics
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final String publishTestTopic = topics.iterator().next();
final VertxTestContext setup = new VertxTestContext();
final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
});
});
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// first start of consumer, letting it commit offsets
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
}));
assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (firstConsumerInstanceStartedAndStopped.failed()) {
ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
return;
}
// preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
receivedRecords.clear();
final String lastRecordKey = "lastKey";
// restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == expectedNumberOfRecords) {
ctx.verify(() -> {
assertThat(receivedRecords.get(0).key()).startsWith("round3");
assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
});
ctx.completeNow();
}
};
LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
LOG.debug("consumer started, publish another record to be received by the consumer");
publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
}));
});
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
}
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
* subscription only receives records published after the consumer <em>start()</em> method has completed.
* <p>
* Also verifies that all records published after the consumer <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
* method has completed are received by the consumer, also if the topic was only created after the consumer
* <em>start</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final String patternPrefix = "test_" + UUID.randomUUID() + "_";
final int numTopics = 2;
final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> patternPrefix + i).collect(Collectors.toSet());
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and (to be ignored) test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
final Promise<Void> nextRecordReceivedPromise = Promise.promise();
nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
final String newTopic = patternPrefix + "new";
final String recordKey = "addedAfterStartKey";
kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic).onComplete(ctx.succeeding(v2 -> {
LOG.debug("publish record to be received by the consumer");
publish(newTopic, recordKey, Buffer.buffer("testPayload"));
}));
nextRecordReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(1);
assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
});
ctx.completeNow();
});
}));
}
Aggregations