use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology.
@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology() throws Exception {
CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
// Build up named topology with two stateful subtopologies
final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
inputStream1.groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream1.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
streams.start();
final NamedTopology namedTopology = topology1Builder.build();
streams.addNamedTopology(namedTopology).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
streams.removeNamedTopology("topology-1", true).all().get();
streams.cleanUpNamedTopology("topology-1");
CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("changelog")).forEach(t -> {
try {
CLUSTER.deleteTopicAndWait(t);
} catch (final InterruptedException e) {
e.printStackTrace();
}
});
final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
inputStream.groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
final NamedTopology namedTopologyDup = topology1BuilderDup.build();
streams.addNamedTopology(namedTopologyDup).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterStart.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy only receives
* records published after the consumer <em>start()</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 2;
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final String publishTestTopic = topics.iterator().next();
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and (to be ignored) test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final String publishedAfterStartRecordKey = "publishedAfterStartKey";
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
// verify received record
ctx.verify(() -> assertThat(record.key()).isEqualTo(publishedAfterStartRecordKey));
ctx.completeNow();
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
LOG.debug("consumer started, publish record to be received by the consumer");
publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload"));
}));
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException("timeout waiting for record to be received"));
}
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerTest method testConsumerInvokesHandlerOnReceivedRecords.
/**
* Verifies that the HonoKafkaConsumer invokes the provided handler on received records.
*
* @param ctx The vert.x test context.
*/
@Test
public void testConsumerInvokesHandlerOnReceivedRecords(final VertxTestContext ctx) {
final int numTestRecords = 5;
final Checkpoint receivedRecordsCheckpoint = ctx.checkpoint(numTestRecords);
final Handler<KafkaConsumerRecord<String, Buffer>> handler = record -> {
receivedRecordsCheckpoint.flag();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
consumer = new HonoKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L));
mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L));
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
IntStream.range(0, numTestRecords).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class HonoKafkaConsumerTest method testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords.
/**
* Verifies that the HonoKafkaConsumer doesn't invoke the provided handler on received records whose ttl has expired.
*
* @param ctx The vert.x test context.
*/
@Test
public void testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords(final VertxTestContext ctx) {
final int numNonExpiredTestRecords = 5;
final Checkpoint receivedRecordsCheckpoint = ctx.checkpoint(numNonExpiredTestRecords);
final Handler<KafkaConsumerRecord<String, Buffer>> handler = record -> {
receivedRecordsCheckpoint.flag();
};
final Checkpoint expiredRecordCheckpoint = ctx.checkpoint(1);
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
consumer = new HonoKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {
@Override
protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord<String, Buffer> record) {
expiredRecordCheckpoint.flag();
}
};
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L));
mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L));
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
// add record with elapsed ttl
mockConsumer.addRecord(createRecordWithElapsedTtl());
IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerRespectsMaxRecordsInProcessingLimit.
/**
* Verifies that the maximum number of records in processing by the consumer does not exceed
* the limit of 1.5 times the <em>max.poll.records</em> config value.
*
* @param ctx The vert.x test context.
*/
@Test
public void testConsumerRespectsMaxRecordsInProcessingLimit(final VertxTestContext ctx) {
final int maxPollRecords = 10;
final int throttlingThreshold = maxPollRecords * AsyncHandlingAutoCommitKafkaConsumer.THROTTLING_THRESHOLD_PERCENTAGE_OF_MAX_POLL_RECORDS / 100;
final int maxRecordsInProcessing = maxPollRecords + Math.max(throttlingThreshold, 1);
final int numTestBatches = 5;
final int numRecordsPerBatch = maxPollRecords;
final int numRecords = numTestBatches * numRecordsPerBatch;
final AtomicInteger offsetCounter = new AtomicInteger();
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<Promise<Void>> uncompletedRecordHandlingPromises = new ArrayList<>();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final AtomicInteger observedMaxRecordsInProcessing = new AtomicInteger();
final AtomicInteger testBatchesToAdd = new AtomicInteger(numTestBatches);
// let the consumer record handler only complete the record processing when consumer record fetching is already paused (or if all records have been received)
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> recordHandler = record -> {
receivedRecords.add(record);
final Promise<Void> recordHandlingCompleted = Promise.promise();
uncompletedRecordHandlingPromises.add(recordHandlingCompleted);
if (consumer.isRecordFetchingPaused() || receivedRecords.size() == numRecords) {
if (uncompletedRecordHandlingPromises.size() > observedMaxRecordsInProcessing.get()) {
observedMaxRecordsInProcessing.set(uncompletedRecordHandlingPromises.size());
}
if (receivedRecords.size() == numRecords) {
LOG.trace("complete all remaining {} record handling promises", uncompletedRecordHandlingPromises.size());
uncompletedRecordHandlingPromises.forEach(Promise::tryComplete);
uncompletedRecordHandlingPromises.clear();
} else {
// complete record handling promises until consumer record fetching isn't paused anymore
completeUntilConsumerRecordFetchingResumed(uncompletedRecordHandlingPromises.iterator());
}
}
if (receivedRecords.size() == numRecords) {
vertx.runOnContext(v -> allRecordsReceivedPromise.tryComplete());
}
return recordHandlingCompleted.future();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.toString(maxPollRecords));
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), recordHandler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
consumer.start().onComplete(ctx.succeeding(v2 -> {
// schedule the poll tasks
schedulePollTasksWithConsumerPausedCheck(offsetCounter, numRecordsPerBatch, testBatchesToAdd);
final long timerId = vertx.setTimer(8000, tid -> {
LOG.info("received records:\n{}", receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n")));
allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", uncompletedRecordHandlingPromises.size(), numRecords));
});
allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> {
vertx.cancelTimer(timerId);
ctx.verify(() -> {
assertWithMessage("observed max no. of records in processing").that(observedMaxRecordsInProcessing.get()).isEqualTo(maxRecordsInProcessing);
});
ctx.completeNow();
}));
}));
}
Aggregations