use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class KafkaBasedCommandSender method subscribeForCommandResponse.
private Future<Void> subscribeForCommandResponse(final String tenantId, final Span span) {
if (commandResponseConsumers.get(tenantId) != null) {
LOGGER.debug("command response consumer already exists for tenant [{}]", tenantId);
span.log("command response consumer already exists");
return Future.succeededFuture();
}
final Map<String, String> consumerConfig = this.consumerConfig.getConsumerConfig(HonoTopic.Type.COMMAND_RESPONSE.toString());
final String autoOffsetResetConfigValue = consumerConfig.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
// Ensure that 'auto.offset.reset' is always set to 'latest'.
if (autoOffsetResetConfigValue != null && !autoOffsetResetConfigValue.equals("latest")) {
LOGGER.warn("[auto.offset.reset] value is set to other than [latest]. It will be ignored and internally set to [latest]");
}
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
// Use a unique group-id so that all command responses for this tenant are received by this consumer.
// Thereby the responses can be correlated with the command that has been sent.
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, tenantId + "-" + UUID.randomUUID());
final String topic = new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
getCommandResponseHandler(tenantId).handle(new KafkaDownstreamMessage(record));
};
final HonoKafkaConsumer consumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig);
consumer.setPollTimeout(Duration.ofMillis(this.consumerConfig.getPollTimeout()));
Optional.ofNullable(kafkaConsumerSupplier).ifPresent(consumer::setKafkaConsumerSupplier);
return consumer.start().recover(error -> {
LOGGER.debug("error creating command response consumer for tenant [{}]", tenantId, error);
TracingHelper.logError(span, "error creating command response consumer", error);
return Future.failedFuture(error);
}).onSuccess(v -> {
LOGGER.debug("created command response consumer for tenant [{}]", tenantId);
span.log("created command response consumer");
commandResponseConsumers.put(tenantId, consumer);
});
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart.
/**
* Verifies that a HonoKafkaConsumer that is using a not yet existing topic and that is configured with
* "latest" as offset reset strategy, only receives records on the auto-created topic published after the consumer
* <em>start()</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) {
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
};
final String topic = "test_" + UUID.randomUUID();
topicsToDeleteAfterTests.add(topic);
kafkaConsumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
final Promise<Void> nextRecordReceivedPromise = Promise.promise();
nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
LOG.debug("consumer started, publish record to be received by the consumer");
final String recordKey = "addedAfterStartKey";
publish(topic, recordKey, Buffer.buffer("testPayload"));
nextRecordReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(1);
assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
});
ctx.completeNow();
});
}));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsForDynamicallyCreatedTopics.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
* subscription receives records published after multiple <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
* invocations have been completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsForDynamicallyCreatedTopics(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final String patternPrefix = "test_" + UUID.randomUUID() + "_";
final int numTopicsAndRecords = 3;
final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == numTopicsAndRecords) {
allRecordsReceivedPromise.complete();
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
LOG.debug("consumer started, create new topics implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
final String recordKey = "addedAfterStartKey";
for (int i = 0; i < numTopicsAndRecords; i++) {
final String topic = patternPrefix + i;
kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(topic).onComplete(ctx.succeeding(v2 -> {
LOG.debug("publish record to be received by the consumer");
publish(topic, recordKey, Buffer.buffer("testPayload"));
}));
}
allRecordsReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(numTopicsAndRecords);
receivedRecords.forEach(record -> assertThat(record.key()).isEqualTo(recordKey));
});
ctx.completeNow();
});
}));
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", numTopicsAndRecords, receivedRecords.size())));
}
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsAfterStart.
/**
* Verifies that a HonoKafkaConsumer configured with "earliest" as offset reset strategy receives all
* current records after the consumer <em>start()</em> method has completed.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsAfterStart(final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 2;
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final int totalExpectedMessages = numTopics * numTestRecordsPerTopic;
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == totalExpectedMessages) {
allRecordsReceivedPromise.complete();
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
allRecordsReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(totalExpectedMessages);
});
ctx.completeNow();
});
}));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnRebalance.
/**
* Verifies that the consumer commits the last fully handled records on rebalance.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testConsumerCommitsOffsetsOnRebalance(final VertxTestContext ctx) throws InterruptedException {
final int numTestRecords = 5;
final VertxTestContext receivedRecordsCtx = new VertxTestContext();
final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
final Promise<Void> promise = Promise.promise();
if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
}
receivedRecordsCheckpoint.flag();
return promise.future();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// periodic commit shall not play a role here
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
IntStream.range(0, numTestRecords).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (receivedRecordsCtx.failed()) {
ctx.failNow(receivedRecordsCtx.causeOfFailure());
return;
}
// records received, complete the handling of some of them
recordsHandlingPromiseMap.get(0L).complete();
recordsHandlingPromiseMap.get(1L).complete();
// offset 3 not completed yet, hence offset 1 is the latest in the row of fully handled records
final AtomicInteger latestFullyHandledOffset = new AtomicInteger(1);
recordsHandlingPromiseMap.get(4L).complete();
// define VertxTestContexts for 3 checks (3x rebalance/commit)
final AtomicInteger checkIndex = new AtomicInteger(0);
final List<VertxTestContext> commitCheckContexts = IntStream.range(0, 3).mapToObj(i -> new VertxTestContext()).collect(Collectors.toList());
final List<Checkpoint> commitCheckpoints = commitCheckContexts.stream().map(c -> c.checkpoint(1)).collect(Collectors.toList());
final InterruptableSupplier<Boolean> waitForCurrentCommitCheckResult = () -> {
assertWithMessage("partition assigned in 5s for checking of commits").that(commitCheckContexts.get(checkIndex.get()).awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (commitCheckContexts.get(checkIndex.get()).failed()) {
ctx.failNow(commitCheckContexts.get(checkIndex.get()).causeOfFailure());
return false;
}
return true;
};
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
ctx.verify(() -> {
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset.get() + 1L);
});
commitCheckpoints.get(checkIndex.get()).flag();
});
// now force a rebalance which should trigger the above onPartitionsAssignedHandler
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (!waitForCurrentCommitCheckResult.get()) {
return;
}
checkIndex.incrementAndGet();
// now another rebalance (ie. commit trigger) - no change in offsets
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (!waitForCurrentCommitCheckResult.get()) {
return;
}
checkIndex.incrementAndGet();
// now complete some more promises
recordsHandlingPromiseMap.get(2L).complete();
recordsHandlingPromiseMap.get(3L).complete();
// offset 4 already complete
latestFullyHandledOffset.set(4);
// again rebalance/commit
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (waitForCurrentCommitCheckResult.get()) {
ctx.completeNow();
}
}
Aggregations