use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testThrottledStrategy.
@Test
void testThrottledStrategy() {
MapBasedConfig config = commonConfiguration().with("commit-strategy", "throttled").with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition tp = new TopicPartition(TOPIC, 0);
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 0L));
consumer.schedulePollTask(() -> {
source.getCommitHandler().partitionsAssigned(Collections.singleton(tp));
consumer.rebalance(Collections.singletonList(new TopicPartition(TOPIC, 0)));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, "k", "v0"));
});
await().until(() -> list.size() == 1);
assertThat(list).hasSize(1);
list.get(0).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp));
assertThat(committed.get(tp)).isNotNull();
assertThat(committed.get(tp).offset()).isEqualTo(1);
});
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, "k", "v1"));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 2, "k", "v2"));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 3, "k", "v3"));
});
await().until(() -> list.size() == 4);
list.get(2).ack().toCompletableFuture().join();
list.get(1).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp));
assertThat(committed.get(tp)).isNotNull();
assertThat(committed.get(tp).offset()).isEqualTo(3);
});
list.get(3).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp));
assertThat(committed.get(tp)).isNotNull();
assertThat(committed.get(tp).offset()).isEqualTo(4);
});
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testWithRebalanceListenerMatchGivenName.
@Test
public void testWithRebalanceListenerMatchGivenName() {
addBeans(NamedRebalanceListener.class);
MapBasedConfig config = commonConfiguration();
config.with("consumer-rebalance-listener.name", "mine").with("client.id", UUID.randomUUID().toString());
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(new TopicPartition(TOPIC, 0), 0L);
offsets.put(new TopicPartition(TOPIC, 1), 0L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(Collections.singletonList(new TopicPartition(TOPIC, 0)));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, "k", "v"));
});
await().until(() -> list.size() == 1);
assertThat(list).hasSize(1);
consumer.schedulePollTask(() -> {
consumer.rebalance(Collections.singletonList(new TopicPartition(TOPIC, 1)));
ConsumerRecord<String, String> record = new ConsumerRecord<>(TOPIC, 1, 0, "k", "v");
consumer.addRecord(record);
});
await().until(() -> list.size() == 2);
assertThat(list).hasSize(2);
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class KafkaCommitHandlerTest method testSourceWithAutoCommitEnabled.
@Test
public void testSourceWithAutoCommitEnabled() throws ExecutionException, TimeoutException, InterruptedException {
MapBasedConfig config = newCommonConfigForSource().with("group.id", "test-source-with-auto-commit-enabled").with(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true").with(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 500).with("value.deserializer", IntegerDeserializer.class.getName());
KafkaConnectorIncomingConfiguration ic = new KafkaConnectorIncomingConfiguration(config);
source = new KafkaSource<>(vertx, "test-source-with-auto-commit-enabled", ic, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
List<Message<?>> messages = Collections.synchronizedList(new ArrayList<>());
source.getStream().subscribe().with(messages::add);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, i), 10);
await().atMost(10, TimeUnit.SECONDS).until(() -> messages.size() >= 10);
assertThat(messages.stream().map(m -> ((KafkaRecord<String, Integer>) m).getPayload()).collect(Collectors.toList())).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
Optional<IncomingKafkaRecord<String, Integer>> firstMessage = messages.stream().map(m -> (IncomingKafkaRecord<String, Integer>) m).findFirst();
assertTrue(firstMessage.isPresent());
CompletableFuture<Void> ackFuture = new CompletableFuture<>();
firstMessage.get().ack().whenComplete((a, t) -> ackFuture.complete(null));
ackFuture.get(10, TimeUnit.SECONDS);
await().atMost(2, TimeUnit.MINUTES).ignoreExceptions().untilAsserted(() -> {
TopicPartition topicPartition = new TopicPartition(topic, 0);
OffsetAndMetadata offset = companion.consumerGroups().offsets("test-source-with-auto-commit-enabled", topicPartition);
assertNotNull(offset);
assertEquals(10L, offset.offset());
});
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class KafkaCommitHandlerTest method testSourceWithThrottledAndRebalance.
@Test
public void testSourceWithThrottledAndRebalance() {
companion.topics().createAndWait(topic, 2);
MapBasedConfig config1 = newCommonConfigForSource().with("client.id", UUID.randomUUID().toString()).with("group.id", "test-source-with-throttled-latest-processed-commit").with("value.deserializer", IntegerDeserializer.class.getName()).with("commit-strategy", "throttled").with("throttled.unprocessed-record-max-age.ms", 1);
MapBasedConfig config2 = newCommonConfigForSource().with("client.id", UUID.randomUUID().toString()).with("group.id", "test-source-with-throttled-latest-processed-commit").with("value.deserializer", IntegerDeserializer.class.getName()).with("commit-strategy", "throttled").with("throttled.unprocessed-record-max-age.ms", 1);
KafkaConnectorIncomingConfiguration ic1 = new KafkaConnectorIncomingConfiguration(config1);
KafkaConnectorIncomingConfiguration ic2 = new KafkaConnectorIncomingConfiguration(config2);
source = new KafkaSource<>(vertx, "test-source-with-throttled-latest-processed-commit", ic1, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
KafkaSource<String, Integer> source2 = new KafkaSource<>(vertx, "test-source-with-throttled-latest-processed-commit", ic2, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
List<Message<?>> messages1 = Collections.synchronizedList(new ArrayList<>());
source.getStream().subscribe().with(m -> {
m.ack();
messages1.add(m);
});
await().until(() -> source.getConsumer().getAssignments().await().indefinitely().size() == 2);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, Integer.toString(i % 2), i), 10000);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages1.size() >= 10);
List<Message<?>> messages2 = Collections.synchronizedList(new ArrayList<>());
source2.getStream().subscribe().with(m -> {
m.ack();
messages2.add(m);
});
await().until(() -> source2.getConsumer().getAssignments().await().indefinitely().size() == 1 && source.getConsumer().getAssignments().await().indefinitely().size() == 1);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, Integer.toString(i % 2), i), 10000);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages1.size() + messages2.size() >= 10000);
await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> {
TopicPartition tp1 = new TopicPartition(topic, 0);
TopicPartition tp2 = new TopicPartition(topic, 1);
Map<TopicPartition, OffsetAndMetadata> result = companion.consumerGroups().offsets("test-source-with-throttled-latest-processed-commit", Arrays.asList(tp1, tp2));
assertNotNull(result.get(tp1));
assertNotNull(result.get(tp2));
assertEquals(result.get(tp1).offset() + result.get(tp2).offset(), 20000);
});
await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> {
HealthReport.HealthReportBuilder healthReportBuilder = HealthReport.builder();
source.isAlive(healthReportBuilder);
HealthReport build = healthReportBuilder.build();
boolean ok = build.isOk();
if (!ok) {
build.getChannels().forEach(ci -> System.out.println(ci.getChannel() + " - " + ci.getMessage()));
}
assertTrue(ok);
});
source2.closeQuietly();
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testThrottledStrategyWithManyRecords.
@Test
void testThrottledStrategyWithManyRecords() {
MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition p0 = new TopicPartition(TOPIC, 0);
TopicPartition p1 = new TopicPartition(TOPIC, 1);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(p0, 0L);
offsets.put(p1, 5L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(offsets.keySet());
source.getCommitHandler().partitionsAssigned(offsets.keySet());
for (int i = 0; i < 500; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
}
});
// Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
int expected = 500 * 2 - 5;
await().until(() -> list.size() == expected);
assertThat(list).hasSize(expected);
list.forEach(m -> m.ack().toCompletableFuture().join());
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
assertThat(committed.get(p0)).isNotNull();
assertThat(committed.get(p0).offset()).isEqualTo(500);
assertThat(committed.get(p1)).isNotNull();
assertThat(committed.get(p1).offset()).isEqualTo(500);
});
consumer.schedulePollTask(() -> {
for (int i = 0; i < 1000; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 500 + i, "k", "v0-" + (500 + i)));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 500 + i, "k", "v1-" + (500 + i)));
}
});
int expected2 = expected + 1000 * 2;
await().until(() -> list.size() == expected2);
list.forEach(m -> m.ack().toCompletableFuture().join());
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
assertThat(committed.get(p0)).isNotNull();
assertThat(committed.get(p0).offset()).isEqualTo(1500);
assertThat(committed.get(p1)).isNotNull();
assertThat(committed.get(p1).offset()).isEqualTo(1500);
});
List<String> payloads = list.stream().map(m -> (String) m.getPayload()).collect(Collectors.toList());
for (int i = 0; i < 1500; i++) {
assertThat(payloads).contains("v0-" + i);
}
for (int i = 5; i < 1500; i++) {
assertThat(payloads).contains("v1-" + i);
}
}
Aggregations