use of com.rabbitmq.stream.impl.Client.MessageListener in project rabbitmq-stream-java-client by rabbitmq.
the class OffsetTrackingTest method consumeAndStore.
@ParameterizedTest
@MethodSource
void consumeAndStore(BiConsumer<String, Client> streamCreator, TestInfo info) throws Exception {
String s = streamName(info);
int batchSize = 100;
int batchCount = 1_000;
int messageCount = batchSize * batchCount;
CountDownLatch publishLatch = new CountDownLatch(messageCount);
Client publisher = cf.get(new ClientParameters().publishConfirmListener((publisherId, publishingId) -> publishLatch.countDown()));
ExecutorService executorService = Executors.newCachedThreadPool();
try {
streamCreator.accept(s, publisher);
byte[] body = new byte[100];
AtomicInteger messageIdSequence = new AtomicInteger();
// publishing a bunch of messages
AtomicLong lastMessageId = new AtomicLong();
publisher.declarePublisher(b(0), null, s);
IntStream.range(0, batchCount).forEach(batchIndex -> publisher.publish(b(0), IntStream.range(0, batchSize).map(i -> messageIdSequence.incrementAndGet()).mapToObj(messageId -> {
lastMessageId.set(messageId);
return publisher.messageBuilder().addData(body).properties().messageId(messageId).messageBuilder().build();
}).collect(Collectors.toList())));
boolean done = publishLatch.await(2, TimeUnit.SECONDS);
assertThat(done).isTrue();
Stream<Tuple3<Integer, Integer, String>> testConfigurations = Stream.of(// { storeEvery, consumeCountFirst, reference }
Tuple.of(100, messageCount / 10, "ref-1"), Tuple.of(50, messageCount / 20, "ref-2"), Tuple.of(10, messageCount / 100, "ref-3"));
Function<Tuple3<Integer, Integer, String>, Callable<Void>> testConfigurationToTask = testConfiguration -> () -> {
int storeEvery = testConfiguration._1;
int consumeCountFirst = testConfiguration._2;
String reference = testConfiguration._3;
AtomicInteger consumeCount = new AtomicInteger();
AtomicLong lastStoredOffset = new AtomicLong();
AtomicLong lastConsumedMessageId = new AtomicLong();
AtomicReference<Client> consumerReference = new AtomicReference<>();
Set<Long> messageIdsSet = ConcurrentHashMap.newKeySet(messageCount);
Collection<Long> messageIdsCollection = new ConcurrentLinkedQueue<>();
CountDownLatch consumeLatch = new CountDownLatch(1);
MessageListener messageListener = (subscriptionId, offset, chunkTimestamp, message) -> {
if (consumeCount.get() <= consumeCountFirst) {
consumeCount.incrementAndGet();
long messageId = message.getProperties().getMessageIdAsLong();
messageIdsSet.add(messageId);
messageIdsCollection.add(messageId);
lastConsumedMessageId.set(messageId);
if (consumeCount.get() % storeEvery == 0) {
consumerReference.get().storeOffset(reference, s, offset);
lastStoredOffset.set(offset);
}
} else {
consumeLatch.countDown();
}
};
Client consumer = cf.get(new ClientParameters().creditNotification((subscriptionId, responseCode) -> LOGGER.debug("Received notification for subscription {}: {}", subscriptionId, responseCode)).chunkListener((client, subscriptionId, offset, messageCount1, dataSize) -> client.credit(subscriptionId, 1)).messageListener(messageListener));
consumerReference.set(consumer);
consumer.subscribe(b(0), s, OffsetSpecification.offset(0), 1);
assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
Response response = consumer.unsubscribe(b(0));
assertThat(response.isOk()).isTrue();
assertThat(lastStoredOffset.get()).isPositive();
waitAtMost(5, () -> lastStoredOffset.get() == consumerReference.get().queryOffset(reference, s).getOffset(), () -> "expecting last stored offset to be " + lastStoredOffset + ", but got " + consumerReference.get().queryOffset(reference, s));
consumer.close();
CountDownLatch consumeLatchSecondWave = new CountDownLatch(1);
AtomicLong firstOffset = new AtomicLong(-1);
messageListener = (subscriptionId, offset, chunkTimestamp, message) -> {
firstOffset.compareAndSet(-1, offset);
long messageId = message.getProperties().getMessageIdAsLong();
if (lastConsumedMessageId.get() < messageId) {
messageIdsSet.add(messageId);
messageIdsCollection.add(messageId);
consumeCount.incrementAndGet();
if (message.getProperties().getMessageIdAsLong() == lastMessageId.get()) {
consumeLatchSecondWave.countDown();
}
}
};
consumer = cf.get(new ClientParameters().chunkListener((client, subscriptionId, offset, messageCount1, dataSize) -> client.credit(subscriptionId, 1)).messageListener(messageListener));
long offsetToStartFrom = consumer.queryOffset(reference, s).getOffset() + 1;
consumer.subscribe(b(0), s, OffsetSpecification.offset(offsetToStartFrom), 1);
assertThat(consumeLatchSecondWave.await(10, TimeUnit.SECONDS)).isTrue();
// there can be a non-message entry that is skipped and makes
// the first received message offset higher
assertThat(firstOffset.get()).isGreaterThanOrEqualTo(offsetToStartFrom);
response = consumer.unsubscribe(b(0));
assertThat(response.isOk()).isTrue();
assertThat(consumeCount.get()).as("check received all messages").isEqualTo(messageCount);
assertThat(messageIdsCollection).as("check there are no duplicates").hasSameSizeAs(messageIdsSet);
return null;
};
List<Future<Void>> futures = testConfigurations.map(testConfigurationToTask).map(task -> executorService.submit(task)).collect(Collectors.toList());
forEach(futures, (i, task) -> {
assertThatNoException().as("task " + i + " failed").isThrownBy(() -> task.get(10, TimeUnit.SECONDS));
});
} finally {
publisher.delete(s);
executorService.shutdownNow();
}
}
use of com.rabbitmq.stream.impl.Client.MessageListener in project rabbitmq-stream-java-client by rabbitmq.
the class OffsetTrackingTest method storeOffsetAndThenAttachByTimestampShouldWork.
@Test
void storeOffsetAndThenAttachByTimestampShouldWork() throws Exception {
// this test performs a timestamp-based index search within a segment with
// a lot of non-user entries (chunks that contain tracking info, not messages)
int messageCount = 50_000;
AtomicReference<CountDownLatch> confirmLatch = new AtomicReference<>(new CountDownLatch(messageCount));
AtomicInteger consumed = new AtomicInteger();
Client client = cf.get(new ClientParameters().publishConfirmListener((publisherId, publishingId) -> confirmLatch.get().countDown()).chunkListener((client1, subscriptionId, offset, messageCount1, dataSize) -> client1.credit(subscriptionId, 1)).messageListener((subscriptionId, offset, chunkTimestamp, message) -> consumed.incrementAndGet()));
assertThat(client.declarePublisher((byte) 0, null, stream).isOk()).isTrue();
Runnable publishAction = () -> {
IntStream.range(0, messageCount).forEach(i -> client.publish((byte) 0, Collections.singletonList(client.codec().messageBuilder().addData("hello world".getBytes(StandardCharsets.UTF_8)).build())));
};
publishAction.run();
assertThat(latchAssert(confirmLatch)).completes();
IntStream.range(0, messageCount).forEach(i -> client.storeOffset("some reference", stream, i));
waitAtMost(() -> client.queryOffset("some reference", stream).getOffset() == messageCount - 1);
confirmLatch.set(new CountDownLatch(messageCount));
long betweenTwoWaves = System.currentTimeMillis();
publishAction.run();
assertThat(latchAssert(confirmLatch)).completes();
assertThat(consumed.get()).isZero();
client.subscribe((byte) 0, stream, OffsetSpecification.timestamp(betweenTwoWaves), 10);
waitAtMost(() -> consumed.get() == messageCount);
}
Aggregations