use of com.rabbitmq.stream.compression.CompressionCodecFactory in project rabbitmq-stream-java-client by rabbitmq.
the class SubEntryBatchingTest method publishConsumeCompressedMessages.
@ParameterizedTest
@MethodSource("compressionCodecFactories")
void publishConsumeCompressedMessages(CompressionCodecFactory compressionCodecFactory, TestInfo info) {
Map<Compression, Integer> compressionToReadBytes = new HashMap<>();
for (Compression compression : Compression.values()) {
int batchCount = 100;
int messagesInBatch = 30;
int messageCount = batchCount * messagesInBatch;
CountDownLatch publishLatch = new CountDownLatch(batchCount);
Client publisher = cf.get(new ClientParameters().compressionCodecFactory(compressionCodecFactory).publishConfirmListener((publisherId, publishingId) -> publishLatch.countDown()));
String s = TestUtils.streamName(info) + "_" + compression.name();
try {
Response response = publisher.create(s);
assertThat(response.isOk()).isTrue();
response = publisher.declarePublisher(b(0), null, s);
assertThat(response.isOk()).isTrue();
Set<String> publishedBodies = ConcurrentHashMap.newKeySet(messageCount);
IntStream.range(0, batchCount).forEach(batchIndex -> {
MessageBatch messageBatch = new MessageBatch(compression);
IntStream.range(0, messagesInBatch).forEach(messageIndex -> {
String body = "batch " + batchIndex + " message " + messageIndex;
messageBatch.add(publisher.messageBuilder().addData(body.getBytes(UTF8)).build());
publishedBodies.add(body);
});
publisher.publishBatches(b(0), Collections.singletonList(messageBatch));
});
assertThat(latchAssert(publishLatch)).completes();
Set<String> consumedBodies = ConcurrentHashMap.newKeySet(batchCount * messagesInBatch);
CountDownLatch consumeLatch = new CountDownLatch(batchCount * messagesInBatch);
CountMetricsCollector metricsCollector = new CountMetricsCollector();
Client consumer = cf.get(new ClientParameters().compressionCodecFactory(compressionCodecFactory).chunkListener((client, subscriptionId, offset, messageCount1, dataSize) -> client.credit(subscriptionId, 1)).messageListener((subscriptionId, offset, chunkTimestamp, message) -> {
consumedBodies.add(new String(message.getBodyAsBinary(), UTF8));
consumeLatch.countDown();
}).metricsCollector(metricsCollector));
response = consumer.subscribe(b(1), s, OffsetSpecification.first(), 2);
assertThat(response.isOk()).isTrue();
assertThat(latchAssert(consumeLatch)).completes();
assertThat(consumedBodies).hasSize(messageCount).hasSameSizeAs(publishedBodies);
publishedBodies.forEach(publishedBody -> assertThat(consumedBodies.contains(publishedBody)).isTrue());
compressionToReadBytes.put(compression, metricsCollector.readBytes.get());
} finally {
Response response = publisher.delete(s);
assertThat(response.isOk()).isTrue();
}
}
int plainReadBytes = compressionToReadBytes.get(Compression.NONE);
Arrays.stream(Compression.values()).filter(comp -> comp != Compression.NONE).forEach(compression -> {
assertThat(compressionToReadBytes.get(compression)).isLessThan(plainReadBytes);
});
}
use of com.rabbitmq.stream.compression.CompressionCodecFactory in project rabbitmq-stream-java-client by rabbitmq.
the class SubEntryBatchingTest method subEntriesCompressedWithDifferentCompressionsShouldBeReadCorrectly.
@Test
void subEntriesCompressedWithDifferentCompressionsShouldBeReadCorrectly() {
List<CompressionCodecFactory> compressionCodecFactories = compressionCodecFactories().collect(Collectors.toList());
int batchCount = compressionCodecFactories.size() * Compression.values().length;
int messagesInBatch = 30;
int messageCount = batchCount * messagesInBatch;
AtomicInteger messageIndex = new AtomicInteger(0);
CountDownLatch publishLatch = new CountDownLatch(batchCount);
Set<String> publishedBodies = ConcurrentHashMap.newKeySet(messageCount);
compressionCodecFactories.forEach(compressionCodecFactory -> {
Client publisher = cf.get(new ClientParameters().compressionCodecFactory(compressionCodecFactory).publishConfirmListener((publisherId, publishingId) -> publishLatch.countDown()));
Response response = publisher.declarePublisher(b(0), null, stream);
assertThat(response.isOk()).isTrue();
for (Compression compression : Compression.values()) {
MessageBatch messageBatch = new MessageBatch(compression);
IntStream.range(0, messagesInBatch).forEach(i -> {
String body = "compression " + compression.name() + " message " + messageIndex.getAndIncrement();
messageBatch.add(publisher.messageBuilder().addData(body.getBytes(UTF8)).build());
publishedBodies.add(body);
});
publisher.publishBatches(b(0), Collections.singletonList(messageBatch));
}
});
assertThat(latchAssert(publishLatch)).completes();
compressionCodecFactories.forEach(compressionCodecFactory -> {
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
Set<String> consumedBodies = ConcurrentHashMap.newKeySet(messageCount);
Client consumer = cf.get(new ClientParameters().compressionCodecFactory(compressionCodecFactory).chunkListener((client, subscriptionId, offset, messageCount1, dataSize) -> client.credit(subscriptionId, 1)).messageListener((subscriptionId, offset, chunkTimestamp, message) -> {
consumedBodies.add(new String(message.getBodyAsBinary(), UTF8));
consumeLatch.countDown();
}));
Response response = consumer.subscribe(b(1), stream, OffsetSpecification.first(), 2);
assertThat(response.isOk()).isTrue();
assertThat(latchAssert(consumeLatch)).completes();
assertThat(consumedBodies).hasSize(messageCount).hasSameSizeAs(publishedBodies);
publishedBodies.forEach(publishBody -> assertThat(consumedBodies.contains(publishBody)).isTrue());
});
}
Aggregations