use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart.
/**
* Verifies that a HonoKafkaConsumer that is using a not yet existing topic and that is configured with
* "latest" as offset reset strategy, only receives records on the auto-created topic published after the consumer
* <em>start()</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) {
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
};
final String topic = "test_" + UUID.randomUUID();
topicsToDeleteAfterTests.add(topic);
kafkaConsumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
final Promise<Void> nextRecordReceivedPromise = Promise.promise();
nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
LOG.debug("consumer started, publish record to be received by the consumer");
final String recordKey = "addedAfterStartKey";
publish(topic, recordKey, Buffer.buffer("testPayload"));
nextRecordReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(1);
assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
});
ctx.completeNow();
});
}));
}
use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsForDynamicallyCreatedTopics.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
* subscription receives records published after multiple <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
* invocations have been completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsForDynamicallyCreatedTopics(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final String patternPrefix = "test_" + UUID.randomUUID() + "_";
final int numTopicsAndRecords = 3;
final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == numTopicsAndRecords) {
allRecordsReceivedPromise.complete();
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
LOG.debug("consumer started, create new topics implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
final String recordKey = "addedAfterStartKey";
for (int i = 0; i < numTopicsAndRecords; i++) {
final String topic = patternPrefix + i;
kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(topic).onComplete(ctx.succeeding(v2 -> {
LOG.debug("publish record to be received by the consumer");
publish(topic, recordKey, Buffer.buffer("testPayload"));
}));
}
allRecordsReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(numTopicsAndRecords);
receivedRecords.forEach(record -> assertThat(record.key()).isEqualTo(recordKey));
});
ctx.completeNow();
});
}));
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", numTopicsAndRecords, receivedRecords.size())));
}
}
use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsAfterStart.
/**
* Verifies that a HonoKafkaConsumer configured with "earliest" as offset reset strategy receives all
* current records after the consumer <em>start()</em> method has completed.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsAfterStart(final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 2;
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final int totalExpectedMessages = numTopics * numTestRecordsPerTopic;
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == totalExpectedMessages) {
allRecordsReceivedPromise.complete();
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
allRecordsReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(totalExpectedMessages);
});
ctx.completeNow();
});
}));
}
use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.
the class HonoKafkaConsumer method start.
@Override
public Future<Void> start() {
context = vertx.getOrCreateContext();
final Promise<Void> startPromise = Promise.promise();
runOnContext(v -> {
// create KafkaConsumer here so that it is created in the Vert.x context of the start() method (KafkaConsumer uses vertx.getOrCreateContext())
Optional.ofNullable(kafkaConsumerSupplier).map(supplier -> Future.succeededFuture(KafkaConsumer.create(vertx, supplier.get()))).orElseGet(() -> {
final KafkaClientFactory kafkaClientFactory = new KafkaClientFactory(vertx);
return kafkaClientFactory.createKafkaConsumerWithRetries(consumerConfig, String.class, Buffer.class, consumerCreationRetriesTimeout);
}).onFailure(thr -> {
log.error("error creating consumer [client-id: {}]", getClientId(), thr);
startPromise.fail(thr);
}).onSuccess(consumer -> {
kafkaConsumer = consumer;
Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.registerKafkaConsumer(kafkaConsumer.unwrap()));
kafkaConsumer.handler(record -> {
if (!startPromise.future().isComplete()) {
log.debug("postponing record handling until start() is completed [topic: {}, partition: {}, offset: {}]", record.topic(), record.partition(), record.offset());
}
startPromise.future().onSuccess(v2 -> {
if (respectTtl && KafkaRecordHelper.isTtlElapsed(record.headers())) {
onRecordHandlerSkippedForExpiredRecord(record);
} else {
try {
recordHandler.handle(record);
} catch (final Exception e) {
log.warn("error handling record [topic: {}, partition: {}, offset: {}, headers: {}]", record.topic(), record.partition(), record.offset(), record.headers(), e);
}
}
});
});
kafkaConsumer.batchHandler(this::onBatchOfRecordsReceived);
kafkaConsumer.exceptionHandler(error -> log.error("consumer error occurred [client-id: {}]", getClientId(), error));
installRebalanceListeners();
// subscribe and wait for rebalance to make sure that when start() completes,
// the consumer is actually ready to receive records already
// let polls finish quickly until start() is completed
kafkaConsumer.asStream().pollTimeout(Duration.ofMillis(10));
subscribeAndWaitForRebalance().onSuccess(v2 -> {
kafkaConsumer.asStream().pollTimeout(pollTimeout);
logSubscribedTopicsOnStartComplete();
}).onComplete(startPromise);
});
});
return startPromise.future();
}
use of io.vertx.kafka.client.consumer.KafkaConsumer in project hono by eclipse.
the class HonoKafkaConsumer method subscribeAndWaitForRebalance.
private Future<Void> subscribeAndWaitForRebalance() {
if (stopCalled.get()) {
return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "already stopped"));
}
final Promise<Void> partitionAssignmentDone = Promise.promise();
final Promise<Void> subscriptionUpdated = Promise.promise();
final Pair<Promise<Void>, Promise<Void>> newPromisePair = Pair.of(subscriptionUpdated, partitionAssignmentDone);
final var promisePair = subscriptionUpdatedAndPartitionsAssignedPromiseRef.updateAndGet(promise -> promise == null ? newPromisePair : promise);
if (!promisePair.equals(newPromisePair)) {
log.debug("subscribeAndWaitForRebalance: will wait for ongoing invocation to complete");
return CompositeFuture.all(promisePair.one().future(), promisePair.two().future()).mapEmpty();
}
if (topicPattern != null) {
kafkaConsumer.subscribe(topicPattern, subscriptionUpdated);
} else {
// Trigger retrieval of metadata for each of the subscription topics if not already available locally;
// this will also trigger topic auto-creation if the topic doesn't exist yet.
// Doing so before the "subscribe" invocation shall ensure that these partitions are considered for
// partition assignment.
topics.forEach(topic -> HonoKafkaConsumerHelper.partitionsFor(kafkaConsumer, topic).onSuccess(partitions -> {
if (partitions.isEmpty()) {
log.info("subscription topic doesn't exist and didn't get auto-created: {} [client-id: {}]", topic, getClientId());
}
}));
kafkaConsumer.subscribe(topics, subscriptionUpdated);
}
// init kafkaConsumerWorker if needed; it has to be retrieved after the first "subscribe" invocation
if (kafkaConsumerWorker == null) {
kafkaConsumerWorker = getKafkaConsumerWorker(kafkaConsumer);
}
vertx.setTimer(WAIT_FOR_REBALANCE_TIMEOUT_MILLIS, ar -> {
if (!partitionAssignmentDone.future().isComplete()) {
subscriptionUpdatedAndPartitionsAssignedPromiseRef.compareAndSet(promisePair, null);
final String errorMsg = "timed out waiting for rebalance and update of subscribed topics";
log.warn(errorMsg);
partitionAssignmentDone.tryFail(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, errorMsg));
}
});
subscriptionUpdated.future().onFailure(thr -> {
subscriptionUpdatedAndPartitionsAssignedPromiseRef.compareAndSet(promisePair, null);
});
return CompositeFuture.all(subscriptionUpdated.future(), partitionAssignmentDone.future()).mapEmpty();
}
Aggregations