Search in sources :

Example 11 with Vertx

use of io.vertx.mutiny.core.Vertx in project quarkus-config-extensions by quarkiverse.

the class VertxConsulConfigGateway method createVertxInstance.

private Vertx createVertxInstance() {
    // We must disable the async DNS resolver as it can cause issues when resolving the Vault instance.
    // This is done using the DISABLE_DNS_RESOLVER_PROP_NAME system property.
    // The DNS resolver used by vert.x is configured during the (synchronous) initialization.
    // So, we just need to disable the async resolver around the Vert.x instance creation.
    String originalValue = System.getProperty(DISABLE_DNS_RESOLVER_PROP_NAME);
    Vertx vertx;
    try {
        System.setProperty(DISABLE_DNS_RESOLVER_PROP_NAME, "true");
        vertx = Vertx.vertx(new VertxOptions());
    } finally {
        // Restore the original value
        if (originalValue == null) {
            System.clearProperty(DISABLE_DNS_RESOLVER_PROP_NAME);
        } else {
            System.setProperty(DISABLE_DNS_RESOLVER_PROP_NAME, originalValue);
        }
    }
    return vertx;
}
Also used : Vertx(io.vertx.mutiny.core.Vertx) VertxOptions(io.vertx.core.VertxOptions)

Example 12 with Vertx

use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.

the class DeprecatedCommitStrategiesTest method testThrottledStrategyWithManyRecords.

@Test
void testThrottledStrategyWithManyRecords() {
    MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("auto.commit.interval.ms", 100);
    String group = UUID.randomUUID().toString();
    source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
    injectMockConsumer(source, consumer);
    List<Message<?>> list = new ArrayList<>();
    source.getStream().subscribe().with(list::add);
    TopicPartition p0 = new TopicPartition(TOPIC, 0);
    TopicPartition p1 = new TopicPartition(TOPIC, 1);
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(p0, 0L);
    offsets.put(p1, 5L);
    consumer.updateBeginningOffsets(offsets);
    consumer.schedulePollTask(() -> {
        consumer.rebalance(offsets.keySet());
        source.getCommitHandler().partitionsAssigned(offsets.keySet());
        for (int i = 0; i < 500; i++) {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
        }
    });
    // Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
    int expected = 500 * 2 - 5;
    await().until(() -> list.size() == expected);
    assertThat(list).hasSize(expected);
    list.forEach(m -> m.ack().toCompletableFuture().join());
    await().untilAsserted(() -> {
        Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
        assertThat(committed.get(p0)).isNotNull();
        assertThat(committed.get(p0).offset()).isEqualTo(500);
        assertThat(committed.get(p1)).isNotNull();
        assertThat(committed.get(p1).offset()).isEqualTo(500);
    });
    consumer.schedulePollTask(() -> {
        for (int i = 0; i < 1000; i++) {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 500 + i, "k", "v0-" + (500 + i)));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 500 + i, "k", "v1-" + (500 + i)));
        }
    });
    int expected2 = expected + 1000 * 2;
    await().until(() -> list.size() == expected2);
    list.forEach(m -> m.ack().toCompletableFuture().join());
    await().untilAsserted(() -> {
        Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
        assertThat(committed.get(p0)).isNotNull();
        assertThat(committed.get(p0).offset()).isEqualTo(1500);
        assertThat(committed.get(p1)).isNotNull();
        assertThat(committed.get(p1).offset()).isEqualTo(1500);
    });
    List<String> payloads = list.stream().map(m -> (String) m.getPayload()).collect(Collectors.toList());
    for (int i = 0; i < 1500; i++) {
        assertThat(payloads).contains("v0-" + i);
    }
    for (int i = 5; i < 1500; i++) {
        assertThat(payloads).contains("v1-" + i);
    }
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) DeserializationFailureHandler(io.smallrye.reactive.messaging.kafka.DeserializationFailureHandler) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) UnsatisfiedResolutionException(javax.enterprise.inject.UnsatisfiedResolutionException) HealthReport(io.smallrye.reactive.messaging.health.HealthReport) HashMap(java.util.HashMap) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) AtomicReference(java.util.concurrent.atomic.AtomicReference) IncomingKafkaRecordMetadata(io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata) MapBasedConfig(io.smallrye.reactive.messaging.test.common.config.MapBasedConfig) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Assertions.assertThatThrownBy(org.assertj.core.api.Assertions.assertThatThrownBy) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) MockKafkaUtils.injectMockConsumer(io.smallrye.reactive.messaging.kafka.base.MockKafkaUtils.injectMockConsumer) TypeLiteral(javax.enterprise.util.TypeLiteral) Map(java.util.Map) CountKafkaCdiEvents(io.smallrye.reactive.messaging.kafka.CountKafkaCdiEvents) KafkaConnector(io.smallrye.reactive.messaging.kafka.KafkaConnector) WeldTestBase(io.smallrye.reactive.messaging.kafka.base.WeldTestBase) Named(javax.inject.Named) Instance(javax.enterprise.inject.Instance) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Awaitility.await(org.awaitility.Awaitility.await) Collection(java.util.Collection) DeploymentException(javax.enterprise.inject.spi.DeploymentException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) GlobalOpenTelemetry(io.opentelemetry.api.GlobalOpenTelemetry) Test(org.junit.jupiter.api.Test) List(java.util.List) Message(org.eclipse.microprofile.reactive.messaging.Message) AfterEach(org.junit.jupiter.api.AfterEach) KafkaConsumerRebalanceListener(io.smallrye.reactive.messaging.kafka.KafkaConsumerRebalanceListener) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Vertx(io.vertx.mutiny.core.Vertx) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) LegacyMetadataTestUtils(io.smallrye.reactive.messaging.kafka.LegacyMetadataTestUtils) ApplicationScoped(javax.enterprise.context.ApplicationScoped) KafkaConnectorIncomingConfiguration(io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration) KafkaSource(io.smallrye.reactive.messaging.kafka.impl.KafkaSource) Collections(java.util.Collections) Message(org.eclipse.microprofile.reactive.messaging.Message) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) KafkaConnectorIncomingConfiguration(io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration) MapBasedConfig(io.smallrye.reactive.messaging.test.common.config.MapBasedConfig) Test(org.junit.jupiter.api.Test)

Example 13 with Vertx

use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.

the class BatchCommitStrategiesTest method testThrottledStrategyWithManyRecords.

@RepeatedTest(10)
void testThrottledStrategyWithManyRecords() {
    MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("auto.commit.interval.ms", 100);
    String group = UUID.randomUUID().toString();
    source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
    injectMockConsumer(source, consumer);
    List<Message<?>> list = new CopyOnWriteArrayList<>();
    source.getBatchStream().subscribe().with(list::add);
    TopicPartition p0 = new TopicPartition(TOPIC, 0);
    TopicPartition p1 = new TopicPartition(TOPIC, 1);
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(p0, 0L);
    offsets.put(p1, 5L);
    consumer.updateBeginningOffsets(offsets);
    consumer.schedulePollTask(() -> {
        consumer.rebalance(offsets.keySet());
        source.getCommitHandler().partitionsAssigned(offsets.keySet());
    });
    for (int i = 0; i < 500; i++) {
        int j = i;
        consumer.schedulePollTask(() -> {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, j, "k", "v0-" + j));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, j, "r", "v1-" + j));
        });
    }
    // Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
    int expected = 500 * 2 - 5;
    await().until(() -> list.stream().map(IncomingKafkaRecordBatch.class::cast).mapToLong(r -> r.getRecords().size()).sum() == expected);
    list.forEach(m -> m.ack().toCompletableFuture().join());
    await().untilAsserted(() -> {
        Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
        assertThat(committed.get(p0)).isNotNull();
        assertThat(committed.get(p0).offset()).isEqualTo(500);
        assertThat(committed.get(p1)).isNotNull();
        assertThat(committed.get(p1).offset()).isEqualTo(500);
    });
    for (int i = 0; i < 1000; i++) {
        int j = i;
        consumer.schedulePollTask(() -> {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 500 + j, "k", "v0-" + (500 + j)));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 500 + j, "k", "v1-" + (500 + j)));
        });
    }
    int expected2 = expected + 1000 * 2;
    await().until(() -> list.stream().map(IncomingKafkaRecordBatch.class::cast).mapToLong(r -> r.getRecords().size()).sum() == expected2);
    list.forEach(m -> m.ack().toCompletableFuture().join());
    await().atMost(Duration.ofMinutes(1)).untilAsserted(() -> {
        Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(offsets.keySet());
        assertThat(committed.get(p0)).isNotNull();
        assertThat(committed.get(p0).offset()).isEqualTo(1500);
        assertThat(committed.get(p1)).isNotNull();
        assertThat(committed.get(p1).offset()).isEqualTo(1500);
    });
    @SuppressWarnings("unchecked") List<String> payloads = list.stream().map(m -> (List<String>) m.getPayload()).flatMap(Collection::stream).collect(Collectors.toList());
    for (int i = 0; i < 1500; i++) {
        assertThat(payloads).contains("v0-" + i);
    }
    for (int i = 5; i < 1500; i++) {
        assertThat(payloads).contains("v1-" + i);
    }
}
Also used : org.apache.kafka.clients.consumer(org.apache.kafka.clients.consumer) BeforeEach(org.junit.jupiter.api.BeforeEach) java.util(java.util) RepeatedTest(org.junit.jupiter.api.RepeatedTest) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) HealthReport(io.smallrye.reactive.messaging.health.HealthReport) io.smallrye.reactive.messaging.kafka(io.smallrye.reactive.messaging.kafka) MapBasedConfig(io.smallrye.reactive.messaging.test.common.config.MapBasedConfig) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) MockKafkaUtils.injectMockConsumer(io.smallrye.reactive.messaging.kafka.base.MockKafkaUtils.injectMockConsumer) TypeLiteral(javax.enterprise.util.TypeLiteral) Duration(java.time.Duration) WeldTestBase(io.smallrye.reactive.messaging.kafka.base.WeldTestBase) Instance(javax.enterprise.inject.Instance) TopicPartition(org.apache.kafka.common.TopicPartition) Awaitility.await(org.awaitility.Awaitility.await) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) Message(org.eclipse.microprofile.reactive.messaging.Message) AfterEach(org.junit.jupiter.api.AfterEach) Vertx(io.vertx.mutiny.core.Vertx) ApplicationScoped(javax.enterprise.context.ApplicationScoped) KafkaSource(io.smallrye.reactive.messaging.kafka.impl.KafkaSource) IncomingKafkaRecordBatchMetadata(io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordBatchMetadata) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Identifier(io.smallrye.common.annotation.Identifier) Message(org.eclipse.microprofile.reactive.messaging.Message) TopicPartition(org.apache.kafka.common.TopicPartition) MapBasedConfig(io.smallrye.reactive.messaging.test.common.config.MapBasedConfig) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) RepeatedTest(org.junit.jupiter.api.RepeatedTest)

Example 14 with Vertx

use of io.vertx.mutiny.core.Vertx in project smallrye-reactive-messaging by smallrye.

the class MqttServerSourceTest method testSingle.

@Test
void testSingle(io.vertx.core.Vertx vertx, VertxTestContext testContext) {
    final Map<String, String> configMap = new HashMap<>();
    configMap.put("port", "0");
    final MqttServerSource source = new MqttServerSource(new Vertx(vertx), new MqttServerConnectorIncomingConfiguration(TestUtils.config(configMap)));
    final PublisherBuilder<MqttMessage> mqttMessagePublisherBuilder = source.source();
    final TestMqttMessage testMessage = new TestMqttMessage("hello/topic", 1, "Hello world!", EXACTLY_ONCE.value(), false);
    final Checkpoint messageReceived = testContext.checkpoint();
    final Checkpoint messageAcknowledged = testContext.checkpoint();
    mqttMessagePublisherBuilder.forEach(mqttMessage -> {
        testContext.verify(() -> TestUtils.assertMqttEquals(testMessage, mqttMessage));
        messageReceived.flag();
        mqttMessage.ack().thenApply(aVoid -> {
            messageAcknowledged.flag();
            return aVoid;
        });
    }).run();
    TestUtils.sendMqttMessages(Collections.singletonList(testMessage), CompletableFuture.supplyAsync(() -> {
        await().until(source::port, port -> port != 0);
        return source.port();
    }), testContext);
}
Also used : VertxTestContext(io.vertx.junit5.VertxTestContext) MqttQoS(io.netty.handler.codec.mqtt.MqttQoS) Awaitility.await(org.awaitility.Awaitility.await) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) VertxExtension(io.vertx.junit5.VertxExtension) Test(org.junit.jupiter.api.Test) List(java.util.List) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) Vertx(io.vertx.mutiny.core.Vertx) Checkpoint(io.vertx.junit5.Checkpoint) Collections(java.util.Collections) PublisherBuilder(org.eclipse.microprofile.reactive.streams.operators.PublisherBuilder) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Checkpoint(io.vertx.junit5.Checkpoint) HashMap(java.util.HashMap) Vertx(io.vertx.mutiny.core.Vertx) Test(org.junit.jupiter.api.Test)

Aggregations

Vertx (io.vertx.mutiny.core.Vertx)14 Awaitility.await (org.awaitility.Awaitility.await)7 KafkaSource (io.smallrye.reactive.messaging.kafka.impl.KafkaSource)6 MapBasedConfig (io.smallrye.reactive.messaging.test.common.config.MapBasedConfig)6 Instance (javax.enterprise.inject.Instance)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 Test (org.junit.jupiter.api.Test)6 MockKafkaUtils.injectMockConsumer (io.smallrye.reactive.messaging.kafka.base.MockKafkaUtils.injectMockConsumer)5 WeldTestBase (io.smallrye.reactive.messaging.kafka.base.WeldTestBase)5 Duration (java.time.Duration)5 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)5 TypeLiteral (javax.enterprise.util.TypeLiteral)5 TopicPartition (org.apache.kafka.common.TopicPartition)5 Message (org.eclipse.microprofile.reactive.messaging.Message)5 AfterEach (org.junit.jupiter.api.AfterEach)5 HealthReport (io.smallrye.reactive.messaging.health.HealthReport)4 io.smallrye.reactive.messaging.kafka (io.smallrye.reactive.messaging.kafka)4 java.util (java.util)4 HashMap (java.util.HashMap)4 Map (java.util.Map)4