use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class SuperStreamConsumerTest method manualOffsetTrackingShouldStoreOnAllPartitions.
@Test
void manualOffsetTrackingShouldStoreOnAllPartitions() throws Exception {
declareSuperStreamTopology(connection, superStream, partitionCount);
Client client = cf.get();
List<String> partitions = client.partitions(superStream);
int messageCount = 10000 * partitionCount;
publishToPartitions(cf, partitions, messageCount);
ConcurrentMap<String, AtomicInteger> messagesReceived = new ConcurrentHashMap<>(partitionCount);
ConcurrentMap<String, Long> lastOffsets = new ConcurrentHashMap<>(partitionCount);
partitions.forEach(p -> {
messagesReceived.put(p, new AtomicInteger(0));
});
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
String consumerName = "my-app";
AtomicInteger totalCount = new AtomicInteger();
Consumer consumer = environment.consumerBuilder().superStream(superStream).offset(OffsetSpecification.first()).name(consumerName).manualTrackingStrategy().builder().messageHandler((context, message) -> {
String partition = new String(message.getBodyAsBinary());
messagesReceived.get(partition).incrementAndGet();
lastOffsets.put(partition, context.offset());
totalCount.incrementAndGet();
if (totalCount.get() % 50 == 0) {
context.storeOffset();
}
consumeLatch.countDown();
}).build();
latchAssert(consumeLatch).completes();
assertThat(messagesReceived).hasSize(partitionCount);
partitions.forEach(p -> {
assertThat(messagesReceived).containsKey(p);
assertThat(messagesReceived.get(p).get()).isEqualTo(messageCount / partitionCount);
});
// checking stored offsets are big enough
// offset near the end (the message count per partition minus a few messages)
long almostLastOffset = messageCount / partitionCount - messageCount / (partitionCount * 10);
partitions.forEach(p -> assertThat(client.queryOffset(consumerName, p).getOffset()).isGreaterThan(almostLastOffset));
consumer.close();
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method manualTrackingConsumerShouldRestartWhereItLeftOff.
@Test
void manualTrackingConsumerShouldRestartWhereItLeftOff() throws Exception {
Producer producer = environment.producerBuilder().stream(stream).build();
int messageCountFirstWave = 10_000;
int messageCountSecondWave = 5_000;
int messageCount = messageCountFirstWave + messageCountSecondWave;
CountDownLatch latchConfirmFirstWave = new CountDownLatch(messageCountFirstWave);
CountDownLatch latchConfirmSecondWave = new CountDownLatch(messageCount);
ConfirmationHandler confirmationHandler = confirmationStatus -> {
latchConfirmFirstWave.countDown();
latchConfirmSecondWave.countDown();
};
AtomicLong messageIdSequence = new AtomicLong();
java.util.function.Consumer<Integer> messageSending = messageCountToSend -> {
IntStream.range(0, messageCountToSend).forEach(i -> producer.send(producer.messageBuilder().addData("".getBytes()).properties().messageId(messageIdSequence.getAndIncrement()).messageBuilder().build(), confirmationHandler));
};
messageSending.accept(messageCountFirstWave);
assertThat(latchAssert(latchConfirmFirstWave)).completes();
int storeEvery = 100;
AtomicInteger consumedMessageCount = new AtomicInteger();
AtomicReference<Consumer> consumerReference = new AtomicReference<>();
AtomicLong lastStoredOffset = new AtomicLong(0);
AtomicLong lastProcessedMessage = new AtomicLong(0);
AtomicInteger storeCount = new AtomicInteger(0);
Consumer consumer = environment.consumerBuilder().stream(stream).offset(OffsetSpecification.first()).name("application-1").manualTrackingStrategy().checkInterval(Duration.ZERO).builder().messageHandler((context, message) -> {
consumedMessageCount.incrementAndGet();
lastProcessedMessage.set(message.getProperties().getMessageIdAsLong());
if (consumedMessageCount.get() % storeEvery == 0) {
context.storeOffset();
lastStoredOffset.set(context.offset());
storeCount.incrementAndGet();
}
}).build();
ConsumerInfo consumerInfo = MonitoringTestUtils.extract(consumer);
assertThat(consumerInfo.getId()).isGreaterThanOrEqualTo(0);
assertThat(consumerInfo.getStream()).isEqualTo(stream);
assertThat(consumerInfo.getSubscriptionClient()).contains(" -> localhost:5552");
assertThat(consumerInfo.getTrackingClient()).contains(" -> localhost:5552");
consumerReference.set(consumer);
waitAtMost(10, () -> consumedMessageCount.get() == messageCountFirstWave);
assertThat(lastStoredOffset.get()).isPositive();
consumer.close();
messageSending.accept(messageCountSecondWave);
assertThat(latchAssert(latchConfirmSecondWave)).completes();
AtomicLong firstOffset = new AtomicLong(0);
consumer = environment.consumerBuilder().stream(stream).name("application-1").manualTrackingStrategy().checkInterval(Duration.ZERO).builder().messageHandler((context, message) -> {
firstOffset.compareAndSet(0, context.offset());
if (message.getProperties().getMessageIdAsLong() > lastProcessedMessage.get()) {
consumedMessageCount.incrementAndGet();
}
}).build();
waitAtMost(3, () -> consumedMessageCount.get() == messageCount, () -> "Expected " + consumedMessageCount.get() + " to reach " + messageCount);
// there will be the tracking records after the first wave of messages,
// messages offset won't be contiguous, so it's not an exact match
assertThat(firstOffset.get()).isGreaterThanOrEqualTo(lastStoredOffset.get());
consumer.close();
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method consumerShouldBeClosedWhenStreamGetsDeleted.
@Test
void consumerShouldBeClosedWhenStreamGetsDeleted(TestInfo info) throws Exception {
String s = streamName(info);
environment.streamCreator().stream(s).create();
int messageCount = 10_000;
CountDownLatch publishLatch = new CountDownLatch(messageCount);
Producer producer = environment.producerBuilder().stream(s).build();
IntStream.range(0, messageCount).forEach(i -> producer.send(producer.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> publishLatch.countDown()));
assertThat(publishLatch.await(10, TimeUnit.SECONDS)).isTrue();
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
StreamConsumer consumer = (StreamConsumer) environment.consumerBuilder().stream(s).offset(OffsetSpecification.first()).messageHandler((offset, message) -> consumeLatch.countDown()).build();
assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(consumer.isOpen()).isTrue();
environment.deleteStream(s);
TestUtils.waitAtMost(10, () -> !consumer.isOpen());
assertThat(consumer.isOpen()).isFalse();
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method useSubscriptionListenerToRestartExactlyWhereDesired.
@Test
@DisabledIfRabbitMqCtlNotSet
void useSubscriptionListenerToRestartExactlyWhereDesired() throws Exception {
AtomicInteger subscriptionListenerCallCount = new AtomicInteger(0);
AtomicInteger receivedMessages = new AtomicInteger(0);
AtomicLong offsetTracking = new AtomicLong(0);
AtomicBoolean started = new AtomicBoolean(false);
int storeEvery = 10_000;
String reference = "ref-1";
CountDownLatch poisonLatch = new CountDownLatch(1);
environment.consumerBuilder().name(reference).stream(stream).offset(OffsetSpecification.first()).subscriptionListener(subscriptionContext -> {
subscriptionListenerCallCount.getAndIncrement();
OffsetSpecification offsetSpecification = started.get() ? OffsetSpecification.offset(offsetTracking.get() + 1) : subscriptionContext.offsetSpecification();
subscriptionContext.offsetSpecification(offsetSpecification);
}).messageHandler((context, message) -> {
receivedMessages.incrementAndGet();
offsetTracking.set(context.offset());
started.set(true);
if ("poison".equals(new String(message.getBodyAsBinary()))) {
poisonLatch.countDown();
}
}).autoTrackingStrategy().flushInterval(// long flush interval
Duration.ofMinutes(60)).messageCountBeforeStorage(storeEvery).builder().build();
AtomicInteger publishedMessages = new AtomicInteger(0);
Producer producer = environment.producerBuilder().stream(stream).build();
IntConsumer publish = messagesToPublish -> {
publishedMessages.addAndGet(messagesToPublish);
IntStream.range(0, messagesToPublish).forEach(i -> producer.send(producer.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> {
}));
};
publish.accept(storeEvery * 2 - 100);
waitAtMost(5, () -> receivedMessages.get() == publishedMessages.get());
Host.killConnection("rabbitmq-stream-consumer-0");
publish.accept(storeEvery * 2);
producer.send(producer.messageBuilder().addData("poison".getBytes()).build(), confirmationStatus -> {
});
latchAssert(poisonLatch).completes();
// no duplicates because the custom offset tracking overrides the stored offset in the
// subscription listener
assertThat(receivedMessages).hasValue(publishedMessages.get() + 1);
}
use of com.rabbitmq.stream.Consumer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamConsumerTest method consume.
@Test
void consume() throws Exception {
int messageCount = 100_000;
CountDownLatch publishLatch = new CountDownLatch(messageCount);
Client client = cf.get(new Client.ClientParameters().publishConfirmListener((publisherId, publishingId) -> publishLatch.countDown()));
client.declarePublisher(b(1), null, stream);
IntStream.range(0, messageCount).forEach(i -> client.publish(b(1), Collections.singletonList(client.messageBuilder().addData("".getBytes()).build())));
assertThat(publishLatch.await(10, TimeUnit.SECONDS)).isTrue();
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
AtomicLong chunkTimestamp = new AtomicLong();
Consumer consumer = environment.consumerBuilder().stream(stream).offset(OffsetSpecification.first()).messageHandler((context, message) -> {
chunkTimestamp.set(context.timestamp());
consumeLatch.countDown();
}).build();
assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(chunkTimestamp.get()).isNotZero();
consumer.close();
}
Aggregations