use of com.rabbitmq.stream.Producer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamProducerTest method sendToNonExistingStreamShouldReturnUnconfirmedStatus.
@Test
void sendToNonExistingStreamShouldReturnUnconfirmedStatus() throws Exception {
Client client = cf.get();
String s = UUID.randomUUID().toString();
Client.Response response = client.create(s);
assertThat(response.isOk()).isTrue();
Producer producer = environment.producerBuilder().stream(s).build();
response = client.delete(s);
assertThat(response.isOk()).isTrue();
// it must close
waitAtMost(10, () -> !((StreamProducer) producer).isOpen());
CountDownLatch confirmationLatch = new CountDownLatch(1);
AtomicReference<ConfirmationStatus> confirmationStatusReference = new AtomicReference<>();
producer.send(producer.messageBuilder().addData("".getBytes()).build(), confirmationStatus -> {
confirmationStatusReference.set(confirmationStatus);
confirmationLatch.countDown();
});
assertThat(confirmationLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(confirmationStatusReference.get()).isNotNull();
assertThat(confirmationStatusReference.get().isConfirmed()).isFalse();
assertThat(confirmationStatusReference.get().getCode()).isEqualTo(Constants.CODE_PRODUCER_CLOSED);
}
use of com.rabbitmq.stream.Producer in project rabbitmq-stream-java-client by rabbitmq.
the class StreamProducerTest method messagesShouldBeDeDuplicatedWhenUsingNameAndPublishingId.
@ParameterizedTest
@ValueSource(ints = { 1, 7 })
void messagesShouldBeDeDuplicatedWhenUsingNameAndPublishingId(int subEntrySize) throws Exception {
int lineCount = 50_000;
int firstWaveLineCount = lineCount / 5;
int backwardCount = firstWaveLineCount / 10;
SortedSet<Integer> document = new TreeSet<>();
IntStream.range(0, lineCount).forEach(i -> document.add(i));
Producer producer = environment.producerBuilder().name("producer-1").stream(stream).subEntrySize(subEntrySize).build();
AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(firstWaveLineCount));
ConfirmationHandler confirmationHandler = confirmationStatus -> latch.get().countDown();
Consumer<Integer> publishMessage = i -> producer.send(producer.messageBuilder().publishingId(i).addData(String.valueOf(i).getBytes()).build(), confirmationHandler);
document.headSet(firstWaveLineCount).forEach(publishMessage);
assertThat(latch.get().await(10, TimeUnit.SECONDS)).isTrue();
latch.set(new CountDownLatch(lineCount - firstWaveLineCount + backwardCount));
document.tailSet(firstWaveLineCount - backwardCount).forEach(publishMessage);
assertThat(latch.get().await(5, TimeUnit.SECONDS)).isTrue();
CountDownLatch consumeLatch = new CountDownLatch(lineCount);
AtomicInteger consumed = new AtomicInteger();
environment.consumerBuilder().stream(stream).offset(OffsetSpecification.first()).messageHandler((offset, message) -> {
consumed.incrementAndGet();
consumeLatch.countDown();
}).build();
assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
Thread.sleep(1000);
// if we are using sub-entries, we cannot avoid duplicates.
// here, a sub-entry in the second wave, right at the end of the re-submitted
// values will contain those duplicates, because its publishing ID will be
// the one of its last message, so the server will accept the whole sub-entry,
// including the duplicates.
assertThat(consumed.get()).isEqualTo(lineCount + backwardCount % subEntrySize);
}
use of com.rabbitmq.stream.Producer in project rabbitmq-stream-java-client by rabbitmq.
the class SuperStreamTest method allMessagesSentWithHashRoutingShouldBeThenConsumed.
@Test
void allMessagesSentWithHashRoutingShouldBeThenConsumed() throws Exception {
int messageCount = 10_000 * partitions;
declareSuperStreamTopology(connection, superStream, partitions);
Producer producer = environment.producerBuilder().stream(superStream).routing(message -> message.getProperties().getMessageIdAsString()).producerBuilder().build();
CountDownLatch publishLatch = new CountDownLatch(messageCount);
IntStream.range(0, messageCount).forEach(i -> producer.send(producer.messageBuilder().properties().messageId(UUID.randomUUID().toString()).messageBuilder().build(), confirmationStatus -> publishLatch.countDown()));
assertThat(latchAssert(publishLatch)).completes(5);
AtomicInteger totalCount = new AtomicInteger(0);
CountDownLatch consumeLatch = new CountDownLatch(messageCount);
environment.consumerBuilder().superStream(superStream).offset(OffsetSpecification.first()).messageHandler((context, message) -> {
totalCount.incrementAndGet();
consumeLatch.countDown();
}).build();
latchAssert(consumeLatch).completes();
assertThat(totalCount.get()).isEqualTo(messageCount);
}
use of com.rabbitmq.stream.Producer in project rabbitmq-stream-java-client by rabbitmq.
the class TlsTest method environmentPublisherConsumer.
@Test
void environmentPublisherConsumer() throws Exception {
try (Environment env = Environment.builder().uri("rabbitmq-stream+tls://localhost").addressResolver(addr -> new Address("localhost", Client.DEFAULT_TLS_PORT)).tls().sslContext(SslContextBuilder.forClient().trustManager(caCertificate()).build()).environmentBuilder().build()) {
int messageCount = 10_000;
CountDownLatch latchConfirm = new CountDownLatch(messageCount);
Producer producer = env.producerBuilder().stream(this.stream).build();
ConfirmationHandler confirmationHandler = confirmationStatus -> latchConfirm.countDown();
IntStream.range(0, messageCount).forEach(i -> producer.send(producer.messageBuilder().addData("".getBytes(StandardCharsets.UTF_8)).build(), confirmationHandler));
assertThat(latchAssert(latchConfirm)).completes();
CountDownLatch latchConsume = new CountDownLatch(messageCount);
env.consumerBuilder().stream(this.stream).offset(OffsetSpecification.first()).messageHandler((context, message) -> latchConsume.countDown()).build();
assertThat(latchAssert(latchConsume)).completes();
}
}
use of com.rabbitmq.stream.Producer in project rabbitmq-stream-java-client by rabbitmq.
the class SuperStreamProducerTest method allMessagesSentToSuperStreamWithRoutingKeyRoutingShouldBeThenConsumed.
@Test
void allMessagesSentToSuperStreamWithRoutingKeyRoutingShouldBeThenConsumed() throws Exception {
int messageCount = 10_000;
routingKeys = new String[] { "amer", "emea", "apac" };
declareSuperStreamTopology(connection, superStream, routingKeys);
Producer producer = environment.producerBuilder().stream(superStream).routing(message -> message.getApplicationProperties().get("region").toString()).key().producerBuilder().build();
CountDownLatch publishLatch = new CountDownLatch(messageCount);
IntStream.range(0, messageCount).forEach(i -> producer.send(producer.messageBuilder().applicationProperties().entry("region", routingKeys[i % routingKeys.length]).messageBuilder().build(), confirmationStatus -> publishLatch.countDown()));
assertThat(latchAssert(publishLatch)).completes(5);
Map<String, AtomicLong> counts = new ConcurrentHashMap<>();
AtomicLong totalCount = new AtomicLong(0);
for (String routingKey : routingKeys) {
String stream = superStream + "-" + routingKey;
AtomicLong streamCount = new AtomicLong(0);
counts.put(stream, streamCount);
environment.consumerBuilder().stream(stream).offset(OffsetSpecification.first()).messageHandler((context, message) -> {
streamCount.incrementAndGet();
totalCount.incrementAndGet();
}).build();
}
waitAtMost(10, () -> totalCount.get() == messageCount);
assertThat(counts.values().stream().map(AtomicLong::get)).hasSameSizeAs(routingKeys).doesNotContain(0L);
assertThat(counts.values().stream().map(AtomicLong::get).reduce(0L, Long::sum)).isEqualTo(messageCount);
}
Aggregations