use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method duplicatesWhenResubscribeAfterDisconnectionWithLongFlushInterval.
@Test
@DisabledIfRabbitMqCtlNotSet
void duplicatesWhenResubscribeAfterDisconnectionWithLongFlushInterval() throws Exception {
AtomicInteger receivedMessages = new AtomicInteger(0);
int storeEvery = 10_000;
String reference = "ref-1";
AtomicBoolean receivedPoison = new AtomicBoolean(false);
environment.consumerBuilder().name(reference).stream(stream).offset(OffsetSpecification.first()).messageHandler((context, message) -> {
receivedMessages.incrementAndGet();
if ("poison".equals(new String(message.getBodyAsBinary()))) {
receivedPoison.set(true);
}
}).autoTrackingStrategy().flushInterval(// long flush interval
Duration.ofMinutes(60)).messageCountBeforeStorage(storeEvery).builder().build();
AtomicInteger publishedMessages = new AtomicInteger(0);
Producer producer = environment.producerBuilder().stream(stream).build();
IntConsumer publish = messagesToPublish -> {
publishedMessages.addAndGet(messagesToPublish);
IntStream.range(0, messagesToPublish).forEach(i -> producer.send(producer.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> {
}));
};
publish.accept(storeEvery * 2 - 100);
waitAtMost(5, () -> receivedMessages.get() == publishedMessages.get());
Host.killConnection("rabbitmq-stream-consumer-0");
publish.accept(storeEvery * 2);
waitAtMost(() -> {
producer.send(producer.messageBuilder().addData("poison".getBytes()).build(), confirmationStatus -> {
});
publishedMessages.incrementAndGet();
return receivedPoison.get();
});
// we have duplicates because the last stored value is behind and the re-subscription uses it
assertThat(receivedMessages).hasValueGreaterThan(publishedMessages.get());
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method consumerShouldReUseInitialOffsetSpecificationAfterDisruptionIfNoMessagesReceived.
@Test
@DisabledIfRabbitMqCtlNotSet
void consumerShouldReUseInitialOffsetSpecificationAfterDisruptionIfNoMessagesReceived() throws Exception {
int messageCountFirstWave = 10_000;
Producer producer = environment.producerBuilder().stream(stream).build();
// send a first wave of messages, they should be consumed later
CountDownLatch publishLatch = new CountDownLatch(messageCountFirstWave);
IntStream.range(0, messageCountFirstWave).forEach(i -> producer.send(producer.messageBuilder().addData("first wave".getBytes()).build(), confirmationStatus -> publishLatch.countDown()));
latchAssert(publishLatch).completes();
// setting up the consumer, offset spec "next", it should only consume messages of the second
// wave
AtomicInteger consumedCount = new AtomicInteger(0);
CountDownLatch consumeLatch = new CountDownLatch(1);
Set<String> bodies = ConcurrentHashMap.newKeySet(10);
environment.consumerBuilder().stream(stream).offset(OffsetSpecification.next()).messageHandler((context, message) -> {
String body = new String(message.getBodyAsBinary());
bodies.add(body);
if (body.contains("second wave")) {
consumeLatch.countDown();
}
}).build();
// killing the consumer connection to trigger an internal restart
Host.killConnection("rabbitmq-stream-consumer-0");
// no messages should have been received
assertThat(consumedCount.get()).isZero();
// starting the second wave, it sends a message every 100 ms
AtomicBoolean keepPublishing = new AtomicBoolean(true);
new Thread(() -> {
while (keepPublishing.get()) {
producer.send(producer.messageBuilder().addData("second wave".getBytes()).build(), confirmationStatus -> publishLatch.countDown());
waitMs(100);
}
}).start();
// the consumer should restart consuming with its initial offset spec, "next"
try {
latchAssert(consumeLatch).completes(recoveryInitialDelay.multipliedBy(2));
assertThat(bodies).hasSize(1).contains("second wave");
} finally {
keepPublishing.set(false);
}
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method consumerShouldKeepConsumingAfterDisruption.
@ParameterizedTest
@MethodSource
@TestUtils.DisabledIfRabbitMqCtlNotSet
void consumerShouldKeepConsumingAfterDisruption(java.util.function.Consumer<Object> disruption, TestInfo info) throws Exception {
String s = streamName(info);
environment.streamCreator().stream(s).create();
try {
int messageCount = 10_000;
CountDownLatch publishLatch = new CountDownLatch(messageCount);
Producer producer = environment.producerBuilder().stream(s).build();
IntStream.range(0, messageCount).forEach(i -> producer.send(producer.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> publishLatch.countDown()));
assertThat(publishLatch.await(10, TimeUnit.SECONDS)).isTrue();
producer.close();
AtomicInteger receivedMessageCount = new AtomicInteger(0);
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
CountDownLatch consumeLatchSecondWave = new CountDownLatch(messageCount * 2);
StreamConsumer consumer = (StreamConsumer) environment.consumerBuilder().stream(s).offset(OffsetSpecification.first()).messageHandler((offset, message) -> {
receivedMessageCount.incrementAndGet();
consumeLatch.countDown();
consumeLatchSecondWave.countDown();
}).build();
assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(consumer.isOpen()).isTrue();
disruption.accept(s);
Client client = cf.get();
TestUtils.waitAtMost(10, () -> {
Client.StreamMetadata metadata = client.metadata(s).get(s);
return metadata.getLeader() != null || !metadata.getReplicas().isEmpty();
});
CountDownLatch publishLatchSecondWave = new CountDownLatch(messageCount);
Producer producerSecondWave = environment.producerBuilder().stream(s).build();
IntStream.range(0, messageCount).forEach(i -> producerSecondWave.send(producerSecondWave.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> publishLatchSecondWave.countDown()));
assertThat(publishLatchSecondWave.await(10, TimeUnit.SECONDS)).isTrue();
producerSecondWave.close();
assertThat(consumeLatchSecondWave.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(receivedMessageCount.get()).isBetween(messageCount * 2, // there can be a duplicate
messageCount * 2 + 1);
assertThat(consumer.isOpen()).isTrue();
consumer.close();
} finally {
environment.deleteStream(s);
}
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamEnvironmentTest method createPublishConsumeDelete.
@ParameterizedTest
@ValueSource(booleans = { false, true })
void createPublishConsumeDelete(boolean lazyInit, TestInfo info) {
try (Environment env = environmentBuilder.lazyInitialization(lazyInit).build()) {
String s = streamName(info);
env.streamCreator().stream(s).create();
int messageCount = 50_000;
CountDownLatch confirmLatch = new CountDownLatch(messageCount);
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
Producer producer = env.producerBuilder().stream(s).build();
ConfirmationHandler confirmationHandler = confirmationStatus -> confirmLatch.countDown();
IntStream.range(0, messageCount).forEach(i -> {
Message message = producer.messageBuilder().addData("".getBytes(StandardCharsets.UTF_8)).build();
producer.send(message, confirmationHandler);
});
latchAssert(confirmLatch).completes();
Consumer consumer = env.consumerBuilder().stream(s).offset(OffsetSpecification.first()).messageHandler((context, message) -> consumeLatch.countDown()).build();
latchAssert(consumeLatch).completes();
producer.close();
consumer.close();
env.deleteStream(s);
}
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamEnvironmentTest method growShrinkResourcesWhenProducersConsumersAreOpenedAndClosed.
@Test
void growShrinkResourcesWhenProducersConsumersAreOpenedAndClosed(TestInfo info) throws Exception {
int messageCount = 100;
int streamCount = 20;
int producersCount = ProducersCoordinator.MAX_PRODUCERS_PER_CLIENT * 3 + 10;
int consumersCount = ConsumersCoordinator.MAX_SUBSCRIPTIONS_PER_CLIENT * 2 + 10;
try (Environment environment = environmentBuilder.build()) {
List<String> streams = IntStream.range(0, streamCount).mapToObj(i -> streamName(info)).map(s -> {
environment.streamCreator().stream(s).create();
return s;
}).collect(Collectors.toCollection(() -> new CopyOnWriteArrayList<>()));
CountDownLatch confirmLatch = new CountDownLatch(messageCount * producersCount);
CountDownLatch consumeLatch = new CountDownLatch(messageCount * producersCount);
List<Producer> producers = IntStream.range(0, producersCount).mapToObj(i -> {
String s = streams.get(i % streams.size());
return environment.producerBuilder().stream(s).build();
}).collect(Collectors.toList());
List<Consumer> consumers = IntStream.range(0, consumersCount).mapToObj(i -> {
String s = streams.get(new Random().nextInt(streams.size()));
return environment.consumerBuilder().stream(s).messageHandler((offset, message) -> consumeLatch.countDown()).build();
}).collect(Collectors.toList());
producers.stream().parallel().forEach(producer -> {
IntStream.range(0, messageCount).forEach(messageIndex -> {
producer.send(producer.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> {
if (confirmationStatus.isConfirmed()) {
confirmLatch.countDown();
}
});
});
});
assertThat(confirmLatch.await(10, SECONDS)).isTrue();
assertThat(consumeLatch.await(10, SECONDS)).isTrue();
EnvironmentInfo environmentInfo = MonitoringTestUtils.extract(environment);
assertThat(environmentInfo.getProducers()).hasSize(1);
int producerManagerCount = environmentInfo.getProducers().get(0).getClients().size();
assertThat(producerManagerCount).isPositive();
assertThat(environmentInfo.getConsumers()).hasSize(1);
int consumerManagerCount = environmentInfo.getConsumers().get(0).getClients().size();
assertThat(consumerManagerCount).isPositive();
java.util.function.Consumer<AutoCloseable> closing = agent -> {
try {
agent.close();
} catch (Exception e) {
throw new RuntimeException(e);
}
};
Collections.reverse(producers);
List<Producer> subProducers = producers.subList(0, ProducersCoordinator.MAX_PRODUCERS_PER_CLIENT);
subProducers.forEach(closing);
Collections.reverse(consumers);
List<Consumer> subConsumers = consumers.subList(0, ConsumersCoordinator.MAX_SUBSCRIPTIONS_PER_CLIENT);
subConsumers.forEach(closing);
producers.removeAll(subProducers);
consumers.removeAll(subConsumers);
environmentInfo = MonitoringTestUtils.extract(environment);
assertThat(environmentInfo.getProducers()).hasSize(1);
assertThat(environmentInfo.getProducers().get(0).getClients()).hasSizeLessThan(producerManagerCount);
assertThat(environmentInfo.getConsumers()).hasSize(1);
assertThat(environmentInfo.getConsumers().get(0).getClients()).hasSizeLessThan(consumerManagerCount);
producers.forEach(closing);
consumers.forEach(closing);
streams.stream().forEach(stream -> environment.deleteStream(stream));
}
}
Aggregations