use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testThrottledStrategyWithTooManyUnackedMessages.
@Test
void testThrottledStrategyWithTooManyUnackedMessages() throws Exception {
MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("health-enabled", true).with("throttled.unprocessed-record-max-age.ms", 1000).with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition p0 = new TopicPartition(TOPIC, 0);
TopicPartition p1 = new TopicPartition(TOPIC, 1);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(p0, 0L);
offsets.put(p1, 5L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(offsets.keySet());
source.getCommitHandler().partitionsAssigned(offsets.keySet());
for (int i = 0; i < 500; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
}
});
// Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
int expected = 500 * 2 - 5;
await().until(() -> list.size() == expected);
assertThat(list).hasSize(expected);
// Only ack the one from partition 0, and the 3 first items from partition 1.
int count = 0;
for (Message<?> message : list) {
IncomingKafkaRecordMetadata<?, ?> metadata = message.getMetadata(IncomingKafkaRecordMetadata.class).orElseThrow(() -> new Exception("metadata expected"));
if (metadata.getPartition() == 0) {
message.ack().toCompletableFuture().join();
} else {
if (count < 5) {
message.ack().toCompletableFuture().join();
count = count + 1;
}
}
LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(metadata, message);
}
AtomicReference<HealthReport> report = new AtomicReference<>();
await().until(() -> {
HealthReport.HealthReportBuilder builder = HealthReport.builder();
source.isAlive(builder);
HealthReport r = builder.build();
report.set(r);
return !r.isOk();
});
HealthReport r = report.get();
String message = r.getChannels().get(0).getMessage();
assertThat(message).contains("my-topic-1", "9");
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testLatestCommitStrategy.
@Test
void testLatestCommitStrategy() {
MapBasedConfig config = commonConfiguration().with("commit-strategy", "latest").with("client.id", UUID.randomUUID().toString());
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition tp0 = new TopicPartition(TOPIC, 0);
TopicPartition tp1 = new TopicPartition(TOPIC, 1);
TopicPartition tp2 = new TopicPartition(TOPIC, 2);
Map<TopicPartition, Long> beginning = new HashMap<>();
beginning.put(tp0, 0L);
beginning.put(tp1, 0L);
beginning.put(tp2, 0L);
consumer.updateBeginningOffsets(beginning);
consumer.schedulePollTask(() -> {
consumer.rebalance(Arrays.asList(tp0, tp1, tp2));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, "k", "v0"));
});
await().until(() -> list.size() == 1);
assertThat(list).hasSize(1);
list.get(0).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp0));
assertThat(committed.get(tp0).offset()).isEqualTo(1);
});
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, "k", "v1"));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 2, "k", "v2"));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 3, "k", "v3"));
});
await().until(() -> list.size() == 4);
Message<?> message = list.get(1);
message.ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp0));
assertThat(committed.get(tp0).offset()).isEqualTo(2);
});
// latest commit strategy, 3 is not acked, but offset 4 got committed.
list.get(3).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp0));
assertThat(committed.get(tp0).offset()).isEqualTo(4);
});
// Do not change anything.
list.get(2).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp0));
assertThat(committed.get(tp0).offset()).isEqualTo(4);
});
// Do not change anything.
list.get(1).ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Collections.singleton(tp0));
assertThat(committed.get(tp0).offset()).isEqualTo(4);
});
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, "k", "v4"));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 2, 0, "k", "v5"));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, "k", "v6"));
});
await().until(() -> list.size() == 7);
Message<?> v6 = list.stream().filter(m -> m.getPayload().equals("v6")).findFirst().orElse(null);
assertThat(v6).isNotNull();
v6.ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(new HashSet<>(Arrays.asList(tp0, tp1, tp2)));
assertThat(committed.get(tp0).offset()).isEqualTo(4);
assertThat(committed.get(tp1).offset()).isEqualTo(2);
assertThat(committed.get(tp2)).isNull();
});
Message<?> v5 = list.stream().filter(m -> m.getPayload().equals("v5")).findFirst().orElse(null);
assertThat(v5).isNotNull();
v5.ack().toCompletableFuture().join();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(new HashSet<>(Arrays.asList(tp0, tp1, tp2)));
assertThat(committed.get(tp0).offset()).isEqualTo(4);
assertThat(committed.get(tp1).offset()).isEqualTo(2);
assertThat(committed.get(tp2).offset()).isEqualTo(1);
});
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testFailureWhenMultipleRebalanceListenerMatchGivenName.
@Test
public void testFailureWhenMultipleRebalanceListenerMatchGivenName() {
MapBasedConfig config = commonConfiguration();
addBeans(NamedRebalanceListener.class, SameNameRebalanceListener.class);
config.with("consumer-rebalance-listener.name", "mine").with("client.id", UUID.randomUUID().toString());
String group = UUID.randomUUID().toString();
assertThatThrownBy(() -> new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1)).isInstanceOf(DeploymentException.class).hasMessageContaining("mine");
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class KafkaCommitHandlerTest method testSourceWithThrottledAndRebalanceWithPartitionsConfig.
@Test
void testSourceWithThrottledAndRebalanceWithPartitionsConfig() {
companion.topics().createAndWait(topic, 4);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, Integer.toString(i % 2), i), 10000);
MapBasedConfig config1 = newCommonConfigForSource().with("client.id", UUID.randomUUID().toString()).with("group.id", "test-source-with-throttled-latest-processed-commit").with("value.deserializer", IntegerDeserializer.class.getName()).with("partitions", 2).with("commit-strategy", "throttled").with(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100).with("throttled.unprocessed-record-max-age.ms", 1000);
MapBasedConfig config2 = newCommonConfigForSource().with("client.id", UUID.randomUUID().toString()).with("group.id", "test-source-with-throttled-latest-processed-commit").with("value.deserializer", IntegerDeserializer.class.getName()).with("partitions", 2).with("commit-strategy", "throttled").with(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100).with("throttled.unprocessed-record-max-age.ms", 1000);
KafkaConnectorIncomingConfiguration ic1 = new KafkaConnectorIncomingConfiguration(config1);
KafkaConnectorIncomingConfiguration ic2 = new KafkaConnectorIncomingConfiguration(config2);
source = new KafkaSource<>(vertx, "test-source-with-throttled-latest-processed-commit", ic1, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
KafkaSource<String, Integer> source2 = new KafkaSource<>(vertx, "test-source-with-throttled-latest-processed-commit", ic2, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
// start source1
List<Message<?>> messages1 = Collections.synchronizedList(new ArrayList<>());
source.getStream().subscribe().with(m -> {
m.ack();
messages1.add(m);
});
// wait for initial assignment
await().atMost(1, TimeUnit.MINUTES).until(() -> source.getConsumer().getAssignments().await().indefinitely().size() >= 2);
// source1 starts receiving messages
await().atMost(2, TimeUnit.MINUTES).until(() -> messages1.size() >= 10);
// start source2
List<Message<?>> messages2 = Collections.synchronizedList(new ArrayList<>());
source2.getStream().subscribe().with(m -> {
m.ack();
messages2.add(m);
});
// wait for rebalance
await().until(() -> {
int sourceAssignments = source.getConsumer().getAssignments().await().indefinitely().size();
int source2Assignments = source2.getConsumer().getAssignments().await().indefinitely().size();
return sourceAssignments >= 1 && source2Assignments >= 1 && sourceAssignments + source2Assignments == 4;
});
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, Integer.toString(i % 2), i), 10000);
// source 2 starts receiving messages
await().atMost(2, TimeUnit.MINUTES).until(() -> messages2.size() >= 4000);
Set<TopicPartition> source2Partitions = source2.getConsumer().getAssignments().await().indefinitely();
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> offsets = companion.consumerGroups().offsets("test-source-with-throttled-latest-processed-commit", new ArrayList<>(source2Partitions));
assertThat(offsets).isNotNull();
});
// quit source2
source2.closeQuietly();
await().atMost(2, TimeUnit.MINUTES).until(() -> messages1.size() + messages2.size() >= 20000);
}
use of io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration in project smallrye-reactive-messaging by smallrye.
the class KafkaCommitHandlerTest method testSourceWithThrottledLatestProcessedCommitEnabledWithoutAck.
@Test
public void testSourceWithThrottledLatestProcessedCommitEnabledWithoutAck() {
MapBasedConfig config = newCommonConfigForSource().with("client.id", UUID.randomUUID().toString()).with("group.id", "test-source-with-throttled-latest-processed-commit-without-acking").with("value.deserializer", IntegerDeserializer.class.getName()).with("commit-strategy", "throttled").with("max.poll.records", 16).with("throttled.unprocessed-record-max-age.ms", 100);
KafkaConnectorIncomingConfiguration ic = new KafkaConnectorIncomingConfiguration(config);
source = new KafkaSource<>(vertx, "test-source-with-throttled-latest-processed-commit-without-acking", ic, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
List<Message<?>> messages = Collections.synchronizedList(new ArrayList<>());
source.getStream().subscribe().with(messages::add);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, i), 10);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages.size() >= 10);
assertThat(messages.stream().map(m -> ((KafkaRecord<String, Integer>) m).getPayload()).collect(Collectors.toList())).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> {
HealthReport.HealthReportBuilder healthReportBuilder = HealthReport.builder();
source.isAlive(healthReportBuilder);
assertTrue(healthReportBuilder.build().isOk());
});
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, i), 30);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages.size() >= 30);
await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> {
HealthReport.HealthReportBuilder healthReportBuilder = HealthReport.builder();
source.isAlive(healthReportBuilder);
assertFalse(healthReportBuilder.build().isOk());
});
}
Aggregations