Search in sources :

Example 76 with Timeout

use of io.vertx.junit5.Timeout in project hono by eclipse.

the class AmqpUploadTestBase method testAdapterRejectsBadInboundMessage.

/**
 * Verifies that a message containing a payload which has the <em>empty notification</em>
 * content type is rejected by the adapter.
 *
 * @param context The Vert.x context for running asynchronous tests.
 * @throws InterruptedException if test is interrupted while running.
 */
@Test
@Timeout(timeUnit = TimeUnit.SECONDS, value = 10)
public void testAdapterRejectsBadInboundMessage(final VertxTestContext context) throws InterruptedException {
    final String tenantId = helper.getRandomTenantId();
    final String deviceId = helper.getRandomDeviceId(tenantId);
    final VertxTestContext setup = new VertxTestContext();
    setupProtocolAdapter(tenantId, new Tenant(), deviceId, ProtonQoS.AT_LEAST_ONCE).map(s -> {
        setup.verify(() -> {
            final UnsignedLong maxMessageSize = s.getRemoteMaxMessageSize();
            assertWithMessage("max-message-size included in adapter's attach frame").that(maxMessageSize).isNotNull();
            assertWithMessage("max-message-size").that(maxMessageSize.longValue()).isGreaterThan(0);
        });
        sender = s;
        return s;
    }).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        context.failNow(setup.causeOfFailure());
        return;
    }
    final Message msg = ProtonHelper.message("some payload");
    msg.setContentType(EventConstants.CONTENT_TYPE_EMPTY_NOTIFICATION);
    msg.setAddress(getEndpointName());
    sender.send(msg, delivery -> {
        context.verify(() -> {
            assertThat(delivery.getRemoteState()).isInstanceOf(Rejected.class);
            final Rejected rejected = (Rejected) delivery.getRemoteState();
            final ErrorCondition error = rejected.getError();
            assertThat((Object) error.getCondition()).isEqualTo(Constants.AMQP_BAD_REQUEST);
        });
        context.completeNow();
    });
}
Also used : HttpURLConnection(java.net.HttpURLConnection) VertxTestContext(io.vertx.junit5.VertxTestContext) AmqpErrorException(org.eclipse.hono.util.AmqpErrorException) DownstreamMessage(org.eclipse.hono.application.client.DownstreamMessage) Rejected(org.apache.qpid.proton.amqp.messaging.Rejected) SelfSignedCertificate(io.vertx.core.net.SelfSignedCertificate) Function(java.util.function.Function) Constants(org.eclipse.hono.util.Constants) Tenant(org.eclipse.hono.service.management.tenant.Tenant) Timeout(io.vertx.junit5.Timeout) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Symbol(org.apache.qpid.proton.amqp.Symbol) UnsignedLong(org.apache.qpid.proton.amqp.UnsignedLong) Message(org.apache.qpid.proton.message.Message) Binary(org.apache.qpid.proton.amqp.Binary) Tenants(org.eclipse.hono.tests.Tenants) RegistryManagementConstants(org.eclipse.hono.util.RegistryManagementConstants) MethodSource(org.junit.jupiter.params.provider.MethodSource) Data(org.apache.qpid.proton.amqp.messaging.Data) Device(org.eclipse.hono.service.management.device.Device) MessageContext(org.eclipse.hono.application.client.MessageContext) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) LinkError(org.apache.qpid.proton.amqp.transport.LinkError) Promise(io.vertx.core.Promise) ServerErrorException(org.eclipse.hono.client.ServerErrorException) DownstreamMessageAssertions(org.eclipse.hono.tests.DownstreamMessageAssertions) ProtonHelper(io.vertx.proton.ProtonHelper) ProtonQoS(io.vertx.proton.ProtonQoS) Truth.assertThat(com.google.common.truth.Truth.assertThat) MessageHelper(org.eclipse.hono.util.MessageHelper) EventConstants(org.eclipse.hono.util.EventConstants) Future(io.vertx.core.Future) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) ErrorCondition(org.apache.qpid.proton.amqp.transport.ErrorCondition) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Buffer(io.vertx.core.buffer.Buffer) Assertions(org.junit.jupiter.api.Assertions) MessageConsumer(org.eclipse.hono.application.client.MessageConsumer) ProtonSender(io.vertx.proton.ProtonSender) Handler(io.vertx.core.Handler) Collections(java.util.Collections) Accepted(org.apache.qpid.proton.amqp.messaging.Accepted) QoS(org.eclipse.hono.util.QoS) Tenant(org.eclipse.hono.service.management.tenant.Tenant) UnsignedLong(org.apache.qpid.proton.amqp.UnsignedLong) DownstreamMessage(org.eclipse.hono.application.client.DownstreamMessage) Message(org.apache.qpid.proton.message.Message) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) VertxTestContext(io.vertx.junit5.VertxTestContext) ErrorCondition(org.apache.qpid.proton.amqp.transport.ErrorCondition) Rejected(org.apache.qpid.proton.amqp.messaging.Rejected) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 77 with Timeout

use of io.vertx.junit5.Timeout in project hono by eclipse.

the class AmqpUploadTestBase method testAutoProvisioningViaGateway.

/**
 * Verifies that an edge device is auto-provisioned if it connects via a gateway equipped with the corresponding
 * authority.
 *
 * @param ctx The Vert.x test context.
 */
@Test
@Timeout(timeUnit = TimeUnit.SECONDS, value = 10)
public void testAutoProvisioningViaGateway(final VertxTestContext ctx) {
    final String tenantId = helper.getRandomTenantId();
    final String gatewayId = helper.getRandomDeviceId(tenantId);
    final Device gateway = new Device().setAuthorities(Collections.singleton(RegistryManagementConstants.AUTHORITY_AUTO_PROVISIONING_ENABLED));
    final String username = IntegrationTestSupport.getUsername(gatewayId, tenantId);
    final String edgeDeviceId = helper.getRandomDeviceId(tenantId);
    final Promise<Void> provisioningNotificationReceived = Promise.promise();
    helper.createAutoProvisioningMessageConsumers(ctx, provisioningNotificationReceived, tenantId, edgeDeviceId).compose(ok -> helper.registry.addDeviceForTenant(tenantId, new Tenant(), gatewayId, gateway, DEVICE_PASSWORD)).compose(ok -> connectToAdapter(username, DEVICE_PASSWORD)).compose(con -> createProducer(null, ProtonQoS.AT_LEAST_ONCE)).compose(sender -> {
        final Message msg = ProtonHelper.message("apFoobar");
        msg.setContentType("text/plain");
        msg.setAddress(String.format("%s/%s/%s", getEndpointName(), tenantId, edgeDeviceId));
        final Promise<Void> result = Promise.promise();
        sender.send(msg, delivery -> {
            ctx.verify(() -> assertThat(delivery.getRemoteState()).isInstanceOf(Accepted.class));
            result.complete();
        });
        return result.future();
    }).compose(ok -> provisioningNotificationReceived.future()).compose(ok -> helper.registry.getRegistrationInfo(tenantId, edgeDeviceId)).onComplete(ctx.succeeding(registrationResult -> {
        ctx.verify(() -> {
            final var info = registrationResult.bodyAsJsonObject();
            IntegrationTestSupport.assertDeviceStatusProperties(info.getJsonObject(RegistryManagementConstants.FIELD_STATUS), true);
        });
        ctx.completeNow();
    }));
}
Also used : HttpURLConnection(java.net.HttpURLConnection) VertxTestContext(io.vertx.junit5.VertxTestContext) AmqpErrorException(org.eclipse.hono.util.AmqpErrorException) DownstreamMessage(org.eclipse.hono.application.client.DownstreamMessage) Rejected(org.apache.qpid.proton.amqp.messaging.Rejected) SelfSignedCertificate(io.vertx.core.net.SelfSignedCertificate) Function(java.util.function.Function) Constants(org.eclipse.hono.util.Constants) Tenant(org.eclipse.hono.service.management.tenant.Tenant) Timeout(io.vertx.junit5.Timeout) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Symbol(org.apache.qpid.proton.amqp.Symbol) UnsignedLong(org.apache.qpid.proton.amqp.UnsignedLong) Message(org.apache.qpid.proton.message.Message) Binary(org.apache.qpid.proton.amqp.Binary) Tenants(org.eclipse.hono.tests.Tenants) RegistryManagementConstants(org.eclipse.hono.util.RegistryManagementConstants) MethodSource(org.junit.jupiter.params.provider.MethodSource) Data(org.apache.qpid.proton.amqp.messaging.Data) Device(org.eclipse.hono.service.management.device.Device) MessageContext(org.eclipse.hono.application.client.MessageContext) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) LinkError(org.apache.qpid.proton.amqp.transport.LinkError) Promise(io.vertx.core.Promise) ServerErrorException(org.eclipse.hono.client.ServerErrorException) DownstreamMessageAssertions(org.eclipse.hono.tests.DownstreamMessageAssertions) ProtonHelper(io.vertx.proton.ProtonHelper) ProtonQoS(io.vertx.proton.ProtonQoS) Truth.assertThat(com.google.common.truth.Truth.assertThat) MessageHelper(org.eclipse.hono.util.MessageHelper) EventConstants(org.eclipse.hono.util.EventConstants) Future(io.vertx.core.Future) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) ErrorCondition(org.apache.qpid.proton.amqp.transport.ErrorCondition) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Buffer(io.vertx.core.buffer.Buffer) Assertions(org.junit.jupiter.api.Assertions) MessageConsumer(org.eclipse.hono.application.client.MessageConsumer) ProtonSender(io.vertx.proton.ProtonSender) Handler(io.vertx.core.Handler) Collections(java.util.Collections) Accepted(org.apache.qpid.proton.amqp.messaging.Accepted) QoS(org.eclipse.hono.util.QoS) Promise(io.vertx.core.Promise) Tenant(org.eclipse.hono.service.management.tenant.Tenant) DownstreamMessage(org.eclipse.hono.application.client.DownstreamMessage) Message(org.apache.qpid.proton.message.Message) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) Device(org.eclipse.hono.service.management.device.Device) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 78 with Timeout

use of io.vertx.junit5.Timeout in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
 * all still available records after the committed offset position has gone out of range
 * (because records have been deleted according to the retention config) and the consumer is restarted.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 1;
    final int numTestRecordsPerTopicPerRound = 20;
    // has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
    final int numPartitions = 1;
    // prepare topics
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final String publishTestTopic = topics.iterator().next();
    final VertxTestContext setup = new VertxTestContext();
    final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
    createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
            LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
            kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
                LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
                publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
                    LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
                    final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
                    waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
                });
            });
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // first start of consumer, letting it commit offsets
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
        publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
    }));
    assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (firstConsumerInstanceStartedAndStopped.failed()) {
        ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
        return;
    }
    // preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
    receivedRecords.clear();
    final String lastRecordKey = "lastKey";
    // restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
    final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == expectedNumberOfRecords) {
            ctx.verify(() -> {
                assertThat(receivedRecords.get(0).key()).startsWith("round3");
                assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
            });
            ctx.completeNow();
        }
    };
    LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
    publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
        kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
        kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
            LOG.debug("consumer started, publish another record to be received by the consumer");
            publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
        }));
    });
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) TopicPartition(io.vertx.kafka.client.common.TopicPartition) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 79 with Timeout

use of io.vertx.junit5.Timeout in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
 * subscription only receives records published after the consumer <em>start()</em> method has completed.
 * <p>
 * Also verifies that all records published after the consumer <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
 * method has completed are received by the consumer, also if the topic was only created after the consumer
 * <em>start</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final String patternPrefix = "test_" + UUID.randomUUID() + "_";
    final int numTopics = 2;
    final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> patternPrefix + i).collect(Collectors.toSet());
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and (to be ignored) test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        final Promise<Void> nextRecordReceivedPromise = Promise.promise();
        nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
        LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
        final String newTopic = patternPrefix + "new";
        final String recordKey = "addedAfterStartKey";
        kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic).onComplete(ctx.succeeding(v2 -> {
            LOG.debug("publish record to be received by the consumer");
            publish(newTopic, recordKey, Buffer.buffer("testPayload"));
        }));
        nextRecordReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(1);
                assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
            });
            ctx.completeNow();
        });
    }));
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) Pattern(java.util.regex.Pattern) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) AtomicReference(java.util.concurrent.atomic.AtomicReference) Promise(io.vertx.core.Promise) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

Timeout (io.vertx.junit5.Timeout)73 VertxTestContext (io.vertx.junit5.VertxTestContext)73 Test (org.junit.jupiter.api.Test)71 TimeUnit (java.util.concurrent.TimeUnit)67 Truth.assertThat (com.google.common.truth.Truth.assertThat)65 Future (io.vertx.core.Future)54 HttpURLConnection (java.net.HttpURLConnection)52 IntegrationTestSupport (org.eclipse.hono.tests.IntegrationTestSupport)49 Handler (io.vertx.core.Handler)47 Buffer (io.vertx.core.buffer.Buffer)46 BeforeEach (org.junit.jupiter.api.BeforeEach)46 Promise (io.vertx.core.Promise)45 Vertx (io.vertx.core.Vertx)37 JsonObject (io.vertx.core.json.JsonObject)37 VertxExtension (io.vertx.junit5.VertxExtension)37 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)37 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)37 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)37 Tenant (org.eclipse.hono.service.management.tenant.Tenant)36 MethodSource (org.junit.jupiter.params.provider.MethodSource)35