use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamEnvironmentTest method environmentPublishersConsumersShouldCloseSuccessfullyWhenBrokerIsDown.
@Test
@TestUtils.DisabledIfRabbitMqCtlNotSet
void environmentPublishersConsumersShouldCloseSuccessfullyWhenBrokerIsDown() throws Exception {
Environment environment = environmentBuilder.recoveryBackOffDelayPolicy(BackOffDelayPolicy.fixed(Duration.ofSeconds(10))).build();
CountDownLatch consumeLatch = new CountDownLatch(2);
Consumer consumer = environment.consumerBuilder().stream(stream).messageHandler((context, message) -> consumeLatch.countDown()).build();
// will be closed by the environment
environment.consumerBuilder().stream(stream).messageHandler((context, message) -> consumeLatch.countDown()).build();
Producer producer = environment.producerBuilder().stream(stream).build();
// will be closed by the environment
environment.producerBuilder().stream(stream).build();
producer.send(producer.messageBuilder().addData("".getBytes(StandardCharsets.UTF_8)).build(), confirmationStatus -> {
});
latchAssert(consumeLatch).completes();
try {
Host.rabbitmqctl("stop_app");
producer.close();
consumer.close();
environment.close();
} finally {
Host.rabbitmqctl("start_app");
}
waitAtMost(30, () -> {
Client client = cf.get();
Map<String, StreamMetadata> metadata = client.metadata(stream);
return metadata.containsKey(stream) && metadata.get(stream).isResponseOk();
});
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamEnvironmentTest method producersAndConsumersShouldBeClosedWhenEnvironmentIsClosed.
@ParameterizedTest
@ValueSource(booleans = { false, true })
void producersAndConsumersShouldBeClosedWhenEnvironmentIsClosed(boolean lazyInit) {
Environment environment = environmentBuilder.lazyInitialization(lazyInit).build();
Collection<Producer> producers = IntStream.range(0, 2).mapToObj(i -> environment.producerBuilder().stream(stream).build()).collect(Collectors.toList());
Collection<Consumer> consumers = IntStream.range(0, 2).mapToObj(i -> environment.consumerBuilder().stream(stream).name(UUID.randomUUID().toString()).messageHandler((offset, message) -> {
}).build()).collect(Collectors.toList());
producers.forEach(producer -> assertThat(((StreamProducer) producer).isOpen()).isTrue());
consumers.forEach(consumer -> assertThat(((StreamConsumer) consumer).isOpen()).isTrue());
EnvironmentInfo environmentInfo = MonitoringTestUtils.extract(environment);
assertThat(environmentInfo.getLocator()).isNotNull();
assertThat(environmentInfo.getProducers()).hasSize(1).element(0).extracting(pool -> pool.getClients()).asList().hasSize(1);
assertThat(environmentInfo.getProducers().get(0).getClients().get(0).getProducerCount()).isEqualTo(2);
assertThat(environmentInfo.getProducers().get(0).getClients().get(0).getTrackingConsumerCount()).isEqualTo(2);
assertThat(environmentInfo.getConsumers()).hasSize(1).element(0).extracting(pool -> pool.getClients()).asList().hasSize(1);
assertThat(environmentInfo.getConsumers().get(0).getClients().get(0).getConsumerCount()).isEqualTo(2);
environment.close();
producers.forEach(producer -> assertThat(((StreamProducer) producer).isOpen()).isFalse());
consumers.forEach(consumer -> assertThat(((StreamConsumer) consumer).isOpen()).isFalse());
environmentInfo = MonitoringTestUtils.extract(environment);
assertThat(environmentInfo.getLocator()).isNull();
assertThat(environmentInfo.getProducers()).isEmpty();
assertThat(environmentInfo.getConsumers()).isEmpty();
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamEnvironmentTest method locatorShouldReconnectIfConnectionIsLost.
@Test
@TestUtils.DisabledIfRabbitMqCtlNotSet
void locatorShouldReconnectIfConnectionIsLost(TestInfo info) throws Exception {
try (Environment environment = environmentBuilder.recoveryBackOffDelayPolicy(BackOffDelayPolicy.fixed(Duration.ofSeconds(1))).build()) {
String s = streamName(info);
environment.streamCreator().stream(s).create();
environment.deleteStream(s);
Host.killConnection("rabbitmq-stream-locator-0");
environment.streamCreator().stream(s).create();
try {
Producer producer = environment.producerBuilder().stream(s).build();
Consumer consumer = environment.consumerBuilder().stream(s).build();
producer.close();
consumer.close();
} finally {
environment.deleteStream(s);
}
}
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class SuperStreamConsumerTest method consumeAllMessagesFromAllPartitions.
@Test
void consumeAllMessagesFromAllPartitions() throws Exception {
declareSuperStreamTopology(connection, superStream, partitionCount);
Client client = cf.get();
List<String> partitions = client.partitions(superStream);
int messageCount = 10000 * partitionCount;
publishToPartitions(cf, partitions, messageCount);
ConcurrentMap<String, AtomicInteger> messagesReceived = new ConcurrentHashMap<>(partitionCount);
partitions.forEach(p -> messagesReceived.put(p, new AtomicInteger(0)));
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
Consumer consumer = environment.consumerBuilder().superStream(superStream).offset(OffsetSpecification.first()).messageHandler((context, message) -> {
String partition = new String(message.getBodyAsBinary());
messagesReceived.get(partition).incrementAndGet();
consumeLatch.countDown();
}).build();
latchAssert(consumeLatch).completes();
assertThat(messagesReceived).hasSize(partitionCount);
partitions.forEach(p -> {
assertThat(messagesReceived).containsKey(p);
assertThat(messagesReceived.get(p).get()).isEqualTo(messageCount / partitionCount);
});
consumer.close();
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class SuperStreamConsumerTest method autoOffsetTrackingShouldStoreOnAllPartitions.
@Test
void autoOffsetTrackingShouldStoreOnAllPartitions() throws Exception {
declareSuperStreamTopology(connection, superStream, partitionCount);
Client client = cf.get();
List<String> partitions = client.partitions(superStream);
int messageCount = 10000 * partitionCount;
publishToPartitions(cf, partitions, messageCount);
ConcurrentMap<String, AtomicInteger> messagesReceived = new ConcurrentHashMap<>(partitionCount);
ConcurrentMap<String, Long> lastOffsets = new ConcurrentHashMap<>(partitionCount);
partitions.forEach(p -> {
messagesReceived.put(p, new AtomicInteger(0));
});
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
String consumerName = "my-app";
AtomicInteger totalCount = new AtomicInteger();
Consumer consumer = environment.consumerBuilder().superStream(superStream).offset(OffsetSpecification.first()).name(consumerName).autoTrackingStrategy().messageCountBeforeStorage(messageCount / partitionCount / 50).builder().messageHandler((context, message) -> {
String partition = new String(message.getBodyAsBinary());
messagesReceived.get(partition).incrementAndGet();
lastOffsets.put(partition, context.offset());
totalCount.incrementAndGet();
if (totalCount.get() % 50 == 0) {
context.storeOffset();
}
consumeLatch.countDown();
}).build();
latchAssert(consumeLatch).completes();
assertThat(messagesReceived).hasSize(partitionCount);
partitions.forEach(p -> {
assertThat(messagesReceived).containsKey(p);
assertThat(messagesReceived.get(p).get()).isEqualTo(messageCount / partitionCount);
});
// checking stored offsets are big enough
// offset near the end (the message count per partition minus a few messages)
long almostLastOffset = messageCount / partitionCount - messageCount / (partitionCount * 10);
partitions.forEach(p -> assertThat(client.queryOffset(consumerName, p).getOffset()).isGreaterThan(almostLastOffset));
consumer.close();
}
Aggregations