use of io.smallrye.reactive.messaging.test.common.config.MapBasedConfig in project smallrye-reactive-messaging by smallrye.
the class ReactiveKafkaConsumerTest method testOffsetResetLatest.
@Test
public void testOffsetResetLatest() throws Exception {
int count = 10;
sendMessages(0, count);
String groupId = UUID.randomUUID().toString();
MapBasedConfig config = createConsumerConfig(groupId).with("topic", topic).with(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
SingletonInstance<KafkaConsumerRebalanceListener> listeners = new SingletonInstance<>(groupId, getKafkaConsumerRebalanceListenerAwaitingAssignation());
source = new KafkaSource<>(vertx, groupId, new KafkaConnectorIncomingConfiguration(config), listeners, CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), 0);
AssertSubscriber<IncomingKafkaRecord<Integer, String>> subscriber = source.getStream().invoke(this::onReceive).subscribe().withSubscriber(AssertSubscriber.create(10));
await().until(() -> {
Map<TopicPartition, Long> map = source.getConsumer().getPositions().await().indefinitely();
return map.values().stream().mapToLong(l -> l).sum() == 10;
});
subscriber.assertSubscribed().assertHasNotReceivedAnyItem();
sendMessages(count, count);
await().untilAsserted(() -> assertThat(subscriber.getItems()).hasSize(count));
await().until(() -> {
Map<TopicPartition, Long> map = source.getConsumer().getPositions().await().indefinitely();
return map.values().stream().mapToLong(l -> l).sum() == 20;
});
subscriber.cancel();
checkConsumedMessages(count, count);
}
use of io.smallrye.reactive.messaging.test.common.config.MapBasedConfig in project smallrye-reactive-messaging by smallrye.
the class ReactiveKafkaConsumerTest method testRebalanceWhilePausedAndPendingCommit.
@Test
public void testRebalanceWhilePausedAndPendingCommit() throws Exception {
String groupId = UUID.randomUUID().toString();
MapBasedConfig config = createConsumerConfig(groupId).put("topic", topic);
MapBasedConfig config2 = createConsumerConfig(groupId).with(ConsumerConfig.CLIENT_ID_CONFIG, "consumer-" + groupId + "-2").with("topic", topic);
List<IncomingKafkaRecord<Integer, String>> list = new CopyOnWriteArrayList<>();
List<IncomingKafkaRecord<Integer, String>> list2 = new CopyOnWriteArrayList<>();
// The first source do not commit for now.
Multi<IncomingKafkaRecord<Integer, String>> stream = createSource(config, groupId).getStream().invoke(list::add);
AssertSubscriber<IncomingKafkaRecord<Integer, String>> subscriber = stream.subscribe().withSubscriber(AssertSubscriber.create(0));
waitForPartitionAssignment();
sendMessages(0, 100);
// Request 50 messages and wait for them to be received. They are not acknowledged
subscriber.request(50);
await().until(() -> list.size() == 50);
// Create the second source acknowledging the message.
// The rebalance will split the partitions between the 2 sources, but both will restaert from offset 0, as nothing
// has been acked.
KafkaSource<Integer, String> source2 = new KafkaSource<>(vertx, groupId, new KafkaConnectorIncomingConfiguration(config2), UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), 3);
source2.getStream().invoke(i -> {
list2.add(i);
i.ack();
}).subscribe().withSubscriber(AssertSubscriber.create(100));
await().until(() -> !source2.getConsumer().getAssignments().await().indefinitely().isEmpty());
// Verify rebalance
await().until(() -> Uni.combine().all().unis(source.getConsumer().getAssignments(), source2.getConsumer().getAssignments()).combinedWith((tp1, tp2) -> tp1.size() + tp2.size()).await().indefinitely() == partitions);
Set<TopicPartition> assignedToSource2 = source2.getConsumer().getAssignments().await().indefinitely();
subscriber.request(100);
await().until(() -> list.size() >= 100);
await().until(() -> list2.size() == assignedToSource2.size() * 25);
// Acknowledge messages, even these received before the rebalance.
list.forEach(IncomingKafkaRecord::ack);
// Verify that the 100 messages have been received.
await().untilAsserted(() -> {
List<String> receivedByFirstSource = list.stream().map(i -> i.getPartition() + "/" + i.getOffset()).collect(Collectors.toList());
List<String> receivedBySecondSource = list2.stream().map(i -> i.getPartition() + "/" + i.getOffset()).collect(Collectors.toList());
Set<String> set = new HashSet<>(receivedByFirstSource);
set.addAll(receivedBySecondSource);
assertThat(set).hasSize(100);
});
source2.closeQuietly();
}
use of io.smallrye.reactive.messaging.test.common.config.MapBasedConfig in project smallrye-reactive-messaging by smallrye.
the class BrokerRestartTest method testPausingWhileBrokerIsDown.
@Test
public void testPausingWhileBrokerIsDown() throws Exception {
try (StrimziKafkaContainer kafka = KafkaBrokerExtension.createKafkaContainer()) {
kafka.start();
await().until(kafka::isRunning);
Integer port = kafka.getMappedPort(KAFKA_PORT);
sendMessages(0, 10, kafka.getBootstrapServers());
String groupId = UUID.randomUUID().toString();
MapBasedConfig config = createConsumerConfig(groupId).with("topic", topic).with(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers()).with(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 6000);
createSource(config, groupId);
Multi<IncomingKafkaRecord<Integer, String>> stream = source.getStream();
AssertSubscriber<IncomingKafkaRecord<Integer, String>> subscriber = stream.onItem().invoke(item -> CompletableFuture.runAsync(item::ack)).subscribe().withSubscriber(AssertSubscriber.create(0));
waitForPartitionAssignment();
await().untilAsserted(() -> subscriber.assertSubscribed().assertHasNotReceivedAnyItem());
subscriber.request(1);
await().until(() -> subscriber.getItems().size() == 1);
await().until(() -> !source.getConsumer().paused().await().indefinitely().isEmpty());
sendMessages(0, 10, kafka.getBootstrapServers());
kafka.stop();
await().until(() -> !kafka.isRunning());
await().until(() -> !source.getConsumer().paused().await().indefinitely().isEmpty());
subscriber.request(3);
await().until(() -> subscriber.getItems().size() == 4);
subscriber.request(10);
AtomicInteger last = new AtomicInteger(subscriber.getItems().size());
// Make sure we can't poll anymore.
await().pollDelay(Duration.ofMillis(1000)).until(() -> {
return last.get() == last.getAndSet(subscriber.getItems().size());
});
try (StrimziKafkaContainer restarted = KafkaBrokerExtension.startKafkaBroker(port)) {
await().until(restarted::isRunning);
subscriber.request(100);
await().until(() -> source.getConsumer().paused().await().indefinitely().isEmpty());
sendMessages(10, 45, restarted.getBootstrapServers());
await().until(() -> subscriber.getItems().size() == 55);
}
}
}
use of io.smallrye.reactive.messaging.test.common.config.MapBasedConfig in project smallrye-reactive-messaging by smallrye.
the class BrokerRestartTest method testWithBrokerRestart.
@Test
public void testWithBrokerRestart() throws Exception {
int sendBatchSize = 10;
try (StrimziKafkaContainer kafka = KafkaBrokerExtension.createKafkaContainer()) {
kafka.start();
String groupId = UUID.randomUUID().toString();
MapBasedConfig config = createConsumerConfig(groupId).put("topic", topic);
KafkaSource<Integer, String> source = createSource(config, groupId);
CountDownLatch receiveLatch = new CountDownLatch(sendBatchSize * 2);
subscribe(source.getStream(), receiveLatch);
sendMessages(0, sendBatchSize);
try (StrimziKafkaContainer ignored = restart(kafka, 5)) {
sendMessages(sendBatchSize, sendBatchSize);
waitForMessages(receiveLatch);
checkConsumedMessages();
}
}
}
use of io.smallrye.reactive.messaging.test.common.config.MapBasedConfig in project smallrye-reactive-messaging by smallrye.
the class ClientTestBase method createSource.
public KafkaSource<Integer, String> createSource() {
String groupId = UUID.randomUUID().toString();
MapBasedConfig config = createConsumerConfig(groupId).put("topic", topic);
return createSource(config, groupId);
}
Aggregations