use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.
the class AbstractMediator method invokeBlocking.
@SuppressWarnings("unchecked")
protected <T> Uni<T> invokeBlocking(Message<?> message, Object... args) {
try {
Optional<LocalContextMetadata> metadata = message != null ? message.getMetadata().get(LocalContextMetadata.class) : Optional.empty();
Context currentContext = metadata.map(m -> Context.newInstance(m.context())).orElseGet(Vertx::currentContext);
return workerPoolRegistry.executeWork(currentContext, Uni.createFrom().emitter(emitter -> {
try {
Object result = this.invoker.invoke(args);
if (result instanceof CompletionStage) {
((CompletionStage<?>) result).thenAccept(x -> emitter.complete((T) x));
} else {
emitter.complete((T) result);
}
} catch (RuntimeException e) {
log.methodException(configuration().methodAsString(), e);
emitter.fail(e);
}
}), configuration.getWorkerPoolName(), configuration.isBlockingExecutionOrdered());
} catch (RuntimeException e) {
log.methodException(configuration().methodAsString(), e);
throw e;
}
}
use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.
the class RabbitMQClientHelper method createClient.
static RabbitMQClient createClient(RabbitMQConnector connector, RabbitMQConnectorCommonConfiguration config, Instance<RabbitMQOptions> optionsInstances, Instance<CredentialsProvider> credentialsProviderInstances) {
RabbitMQClient client;
Optional<String> clientOptionsName = config.getClientOptionsName();
Vertx vertx = connector.getVertx();
if (clientOptionsName.isPresent()) {
client = createClientFromClientOptionsBean(vertx, optionsInstances, clientOptionsName.get());
} else {
client = getClient(vertx, config, credentialsProviderInstances);
}
connector.addClient(client);
return client;
}
use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.
the class MqttServerSourceTest method testMultiple.
@Test
void testMultiple(io.vertx.core.Vertx vertx, VertxTestContext testContext) {
final Map<String, String> configMap = new HashMap<>();
configMap.put("port", "0");
final MqttServerSource source = new MqttServerSource(new Vertx(vertx), new MqttServerConnectorIncomingConfiguration(TestUtils.config(configMap)));
final PublisherBuilder<MqttMessage> mqttMessagePublisherBuilder = source.source();
final List<TestMqttMessage> testMessages = new CopyOnWriteArrayList<>();
testMessages.add(new TestMqttMessage("hello/topic", 1, "Hello world!", EXACTLY_ONCE.value(), false));
testMessages.add(new TestMqttMessage("foo/bar", 2, "dkufhdspkjfosdjfs;", AT_LEAST_ONCE.value(), true));
testMessages.add(new TestMqttMessage("foo/bar", -1, "Hello world!", AT_MOST_ONCE.value(), false));
final Checkpoint messageReceived = testContext.checkpoint(testMessages.size());
final Checkpoint messageAcknowledged = testContext.checkpoint(testMessages.size());
final AtomicInteger index = new AtomicInteger(0);
mqttMessagePublisherBuilder.forEach(mqttMessage -> {
testContext.verify(() -> TestUtils.assertMqttEquals(testMessages.get(index.getAndIncrement()), mqttMessage));
messageReceived.flag();
mqttMessage.ack().thenApply(aVoid -> {
messageAcknowledged.flag();
return aVoid;
});
}).run();
TestUtils.sendMqttMessages(testMessages, CompletableFuture.supplyAsync(() -> {
await().until(source::port, port -> port != 0);
return source.port();
}), testContext);
}
use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.
the class BatchCommitStrategiesTest method testThrottledStrategyWithTooManyUnackedMessages.
@Test
void testThrottledStrategyWithTooManyUnackedMessages() {
MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("health-enabled", true).with("throttled.unprocessed-record-max-age.ms", 1000).with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new CopyOnWriteArrayList<>();
source.getBatchStream().subscribe().with(list::add);
TopicPartition p0 = new TopicPartition(TOPIC, 0);
TopicPartition p1 = new TopicPartition(TOPIC, 1);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(p0, 0L);
offsets.put(p1, 5L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(offsets.keySet());
source.getCommitHandler().partitionsAssigned(offsets.keySet());
});
for (int i = 0; i < 500; i++) {
int j = i;
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, j, "k", "v0-" + j));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, j, "r", "v1-" + j));
});
}
// Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
int expected = 500 * 2 - 5;
await().until(() -> list.stream().map(IncomingKafkaRecordBatch.class::cast).mapToLong(r -> r.getRecords().size()).sum() == expected);
// Ack first 10 records
int count = 0;
for (Message<?> message : list) {
assertThat(message.getMetadata(IncomingKafkaRecordBatchMetadata.class)).isPresent();
if (count < 10) {
message.ack().toCompletableFuture().join();
count++;
}
}
// wait until health check is not ok
await().until(() -> {
HealthReport.HealthReportBuilder builder = HealthReport.builder();
source.isAlive(builder);
HealthReport r = builder.build();
return !r.isOk();
});
// build the health check again and get the report message
HealthReport.HealthReportBuilder builder = HealthReport.builder();
source.isAlive(builder);
String message = builder.build().getChannels().get(0).getMessage();
assertThat(message).containsAnyOf("my-topic-0", "my-topic-1");
}
use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.
the class CommitStrategiesTest method testThrottledStrategyWithManyRecords.
@RepeatedTest(10)
void testThrottledStrategyWithManyRecords() {
MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition p0 = new TopicPartition(TOPIC, 0);
TopicPartition p1 = new TopicPartition(TOPIC, 1);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(p0, 0L);
offsets.put(p1, 5L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(offsets.keySet());
source.getCommitHandler().partitionsAssigned(offsets.keySet());
for (int i = 0; i < 500; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
}
});
// Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
int expected = 500 * 2 - 5;
await().until(() -> list.size() == expected);
assertThat(list).hasSize(expected);
list.forEach(m -> m.ack().toCompletableFuture().join());
await().untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
assertThat(committed.get(p0)).isNotNull();
assertThat(committed.get(p0).offset()).isEqualTo(500);
assertThat(committed.get(p1)).isNotNull();
assertThat(committed.get(p1).offset()).isEqualTo(500);
});
consumer.schedulePollTask(() -> {
for (int i = 0; i < 1000; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 500 + i, "k", "v0-" + (500 + i)));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 500 + i, "k", "v1-" + (500 + i)));
}
});
int expected2 = expected + 1000 * 2;
await().until(() -> list.size() == expected2);
list.forEach(m -> m.ack().toCompletableFuture().join());
await().atMost(Duration.ofMinutes(1)).untilAsserted(() -> {
Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
assertThat(committed.get(p0)).isNotNull();
assertThat(committed.get(p0).offset()).isEqualTo(1500);
assertThat(committed.get(p1)).isNotNull();
assertThat(committed.get(p1).offset()).isEqualTo(1500);
});
List<String> payloads = list.stream().map(m -> (String) m.getPayload()).collect(Collectors.toList());
for (int i = 0; i < 1500; i++) {
assertThat(payloads).contains("v0-" + i);
}
for (int i = 5; i < 1500; i++) {
assertThat(payloads).contains("v1-" + i);
}
}
Aggregations