Search in sources :

Example 6 with TIMEOUT

use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.

the class CommandAndControlAmqpIT method testSendCommandFailsWhenNoCredit.

/**
 * Verifies that the adapter immediately forwards the <em>released</em> disposition
 * if there is no credit left for sending the command to the device.
 * <p>
 * If Kafka is used, this means a corresponding error command response is published.
 *
 * @param endpointConfig The endpoints to use for sending/receiving commands.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if not all commands and responses are exchanged in time.
 */
@ParameterizedTest(name = IntegrationTestSupport.PARAMETERIZED_TEST_NAME_PATTERN)
@MethodSource("allCombinations")
@Timeout(timeUnit = TimeUnit.SECONDS, value = 10)
public void testSendCommandFailsWhenNoCredit(final AmqpCommandEndpointConfiguration endpointConfig, final VertxTestContext ctx) throws InterruptedException {
    final String commandTargetDeviceId = endpointConfig.isSubscribeAsGateway() ? helper.setupGatewayDeviceBlocking(tenantId, deviceId, 5) : deviceId;
    final String firstCommandSubject = "firstCommandSubject";
    final Promise<Void> firstCommandReceived = Promise.promise();
    final VertxTestContext setup = new VertxTestContext();
    final Checkpoint setupDone = setup.checkpoint();
    final Checkpoint preconditions = setup.checkpoint(1);
    connectToAdapter(tenantId, deviceId, password, () -> createEventConsumer(tenantId, msg -> {
        // expect empty notification with TTD -1
        setup.verify(() -> assertThat(msg.getContentType()).isEqualTo(EventConstants.CONTENT_TYPE_EMPTY_NOTIFICATION));
        final TimeUntilDisconnectNotification notification = msg.getTimeUntilDisconnectNotification().orElse(null);
        log.info("received notification [{}]", notification);
        setup.verify(() -> assertThat(notification).isNotNull());
        if (notification.getTtd() == -1) {
            preconditions.flag();
        }
    })).compose(con -> subscribeToCommands(endpointConfig, tenantId, commandTargetDeviceId)).onSuccess(recv -> {
        recv.handler((delivery, msg) -> {
            log.info("received command [name: {}, reply-to: {}, correlation-id: {}]", msg.getSubject(), msg.getReplyTo(), msg.getCorrelationId());
            ctx.verify(() -> {
                assertThat(msg.getSubject()).isEqualTo(firstCommandSubject);
            });
            firstCommandReceived.complete();
            ProtonHelper.accepted(delivery, true);
        // don't send credits
        });
        // just give 1 initial credit
        recv.flow(1);
    }).onComplete(setup.succeeding(v -> setupDone.flag()));
    assertWithMessage("setup of adapter finished within %s seconds", IntegrationTestSupport.getTestSetupTimeout()).that(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // send first command
    helper.sendOneWayCommand(tenantId, commandTargetDeviceId, firstCommandSubject, "text/plain", Buffer.buffer("cmd"), helper.getSendCommandTimeout(true)).onFailure(ctx::failNow).compose(ok -> {
        log.info("sent first command [subject: {}]", firstCommandSubject);
        return firstCommandReceived.future();
    }).compose(ok -> helper.sendCommand(tenantId, commandTargetDeviceId, "secondCommandSubject", "text/plain", Buffer.buffer("cmd"), helper.getSendCommandTimeout(false))).onComplete(ctx.failing(t -> {
        ctx.verify(() -> {
            assertThat(t).isInstanceOf(ServerErrorException.class);
            assertThat(((ServerErrorException) t).getErrorCode()).isEqualTo(HttpURLConnection.HTTP_UNAVAILABLE);
            // with no explicit credit check, the AMQP adapter would just run into the
            // "waiting for delivery update" timeout (after 1s) and the error here would be caused
            // by a request timeout in the sendOneWayCommand() method above
            assertThat(t).isNotInstanceOf(SendMessageTimeoutException.class);
            assertThat(t.getMessage()).doesNotContain("timed out");
        });
        ctx.completeNow();
    }));
}
Also used : HttpURLConnection(java.net.HttpURLConnection) ProtonConnection(io.vertx.proton.ProtonConnection) ProtonReceiver(io.vertx.proton.ProtonReceiver) BeforeEach(org.junit.jupiter.api.BeforeEach) DownstreamMessage(org.eclipse.hono.application.client.DownstreamMessage) BiFunction(java.util.function.BiFunction) Tenant(org.eclipse.hono.service.management.tenant.Tenant) HonoProtonHelper(org.eclipse.hono.util.HonoProtonHelper) Timeout(io.vertx.junit5.Timeout) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ProtonMessageHandler(io.vertx.proton.ProtonMessageHandler) Duration(java.time.Duration) Map(java.util.Map) AmqpError(org.apache.qpid.proton.amqp.transport.AmqpError) MethodSource(org.junit.jupiter.params.provider.MethodSource) ResourceLimits(org.eclipse.hono.util.ResourceLimits) MessageContext(org.eclipse.hono.application.client.MessageContext) SubscriberRole(org.eclipse.hono.tests.CommandEndpointConfiguration.SubscriberRole) Truth.assertWithMessage(com.google.common.truth.Truth.assertWithMessage) DownstreamMessageAssertions(org.eclipse.hono.tests.DownstreamMessageAssertions) ProtonQoS(io.vertx.proton.ProtonQoS) MessageHelper(org.eclipse.hono.util.MessageHelper) VertxExtension(io.vertx.junit5.VertxExtension) EventConstants(org.eclipse.hono.util.EventConstants) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) ErrorCondition(org.apache.qpid.proton.amqp.transport.ErrorCondition) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) Optional(java.util.Optional) ProtonSender(io.vertx.proton.ProtonSender) Checkpoint(io.vertx.junit5.Checkpoint) Accepted(org.apache.qpid.proton.amqp.messaging.Accepted) VertxTestContext(io.vertx.junit5.VertxTestContext) GenericKafkaSender(org.eclipse.hono.tests.GenericKafkaSender) Rejected(org.apache.qpid.proton.amqp.messaging.Rejected) ClientErrorException(org.eclipse.hono.client.ClientErrorException) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Supplier(java.util.function.Supplier) Constants(org.eclipse.hono.util.Constants) TimeUntilDisconnectNotification(org.eclipse.hono.util.TimeUntilDisconnectNotification) Message(org.apache.qpid.proton.message.Message) Promise(io.vertx.core.Promise) ServerErrorException(org.eclipse.hono.client.ServerErrorException) ProtonHelper(io.vertx.proton.ProtonHelper) KafkaRecordHelper(org.eclipse.hono.client.kafka.KafkaRecordHelper) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) HonoTopic(org.eclipse.hono.client.kafka.HonoTopic) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MessageConsumer(org.eclipse.hono.application.client.MessageConsumer) SendMessageTimeoutException(org.eclipse.hono.client.SendMessageTimeoutException) NoopSpan(io.opentracing.noop.NoopSpan) GenericSenderLink(org.eclipse.hono.client.amqp.GenericSenderLink) Handler(io.vertx.core.Handler) Checkpoint(io.vertx.junit5.Checkpoint) VertxTestContext(io.vertx.junit5.VertxTestContext) TimeUntilDisconnectNotification(org.eclipse.hono.util.TimeUntilDisconnectNotification) ServerErrorException(org.eclipse.hono.client.ServerErrorException) SendMessageTimeoutException(org.eclipse.hono.client.SendMessageTimeoutException) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 7 with TIMEOUT

use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart.

/**
 * Verifies that a HonoKafkaConsumer that is using a not yet existing topic and that is configured with
 * "latest" as offset reset strategy, only receives records on the auto-created topic published after the consumer
 * <em>start()</em> method has completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) {
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
    };
    final String topic = "test_" + UUID.randomUUID();
    topicsToDeleteAfterTests.add(topic);
    kafkaConsumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        final Promise<Void> nextRecordReceivedPromise = Promise.promise();
        nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
        LOG.debug("consumer started, publish record to be received by the consumer");
        final String recordKey = "addedAfterStartKey";
        publish(topic, recordKey, Buffer.buffer("testPayload"));
        nextRecordReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(1);
                assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
            });
            ctx.completeNow();
        });
    }));
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) Promise(io.vertx.core.Promise) ArrayList(java.util.ArrayList) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) AtomicReference(java.util.concurrent.atomic.AtomicReference) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 8 with TIMEOUT

use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsForDynamicallyCreatedTopics.

/**
 * Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
 * subscription receives records published after multiple <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
 * invocations have been completed.
 *
 * @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsForDynamicallyCreatedTopics(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
    final String patternPrefix = "test_" + UUID.randomUUID() + "_";
    final int numTopicsAndRecords = 3;
    final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    final Promise<Void> allRecordsReceivedPromise = Promise.promise();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == numTopicsAndRecords) {
            allRecordsReceivedPromise.complete();
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        LOG.debug("consumer started, create new topics implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
        final String recordKey = "addedAfterStartKey";
        for (int i = 0; i < numTopicsAndRecords; i++) {
            final String topic = patternPrefix + i;
            kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(topic).onComplete(ctx.succeeding(v2 -> {
                LOG.debug("publish record to be received by the consumer");
                publish(topic, recordKey, Buffer.buffer("testPayload"));
            }));
        }
        allRecordsReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(numTopicsAndRecords);
                receivedRecords.forEach(record -> assertThat(record.key()).isEqualTo(recordKey));
            });
            ctx.completeNow();
        });
    }));
    if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
        ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", numTopicsAndRecords, receivedRecords.size())));
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) Pattern(java.util.regex.Pattern) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Timeout(io.vertx.junit5.Timeout) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 9 with TIMEOUT

use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.

the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsAfterStart.

/**
 * Verifies that a HonoKafkaConsumer configured with "earliest" as offset reset strategy receives all
 * current records after the consumer <em>start()</em> method has completed.
 *
 * @param ctx The vert.x test context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsAfterStart(final VertxTestContext ctx) throws InterruptedException {
    final int numTopics = 2;
    final int numPartitions = 5;
    final int numTestRecordsPerTopic = 20;
    final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
    final VertxTestContext setup = new VertxTestContext();
    createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    LOG.debug("topics created and test records published");
    // prepare consumer
    final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    final Promise<Void> allRecordsReceivedPromise = Promise.promise();
    final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
    final int totalExpectedMessages = numTopics * numTestRecordsPerTopic;
    final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
        receivedRecords.add(record);
        if (receivedRecords.size() == totalExpectedMessages) {
            allRecordsReceivedPromise.complete();
        }
    };
    kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
    // start consumer
    kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
        ctx.verify(() -> {
            assertThat(receivedRecords.size()).isEqualTo(0);
        });
        allRecordsReceivedPromise.future().onComplete(ar -> {
            ctx.verify(() -> {
                assertThat(receivedRecords.size()).isEqualTo(totalExpectedMessages);
            });
            ctx.completeNow();
        });
    }));
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Duration(java.time.Duration) Map(java.util.Map) TopicConfig(org.apache.kafka.common.config.TopicConfig) MethodSource(org.junit.jupiter.params.provider.MethodSource) Collection(java.util.Collection) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) UUID(java.util.UUID) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Test(org.junit.jupiter.api.Test) List(java.util.List) Stream(java.util.stream.Stream) Buffer(io.vertx.core.buffer.Buffer) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) Optional(java.util.Optional) KafkaConsumer(io.vertx.kafka.client.consumer.KafkaConsumer) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) RecordMetadata(io.vertx.kafka.client.producer.RecordMetadata) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) Truth.assertThat(com.google.common.truth.Truth.assertThat) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) CooperativeStickyAssignor(org.apache.kafka.clients.consumer.CooperativeStickyAssignor) Handler(io.vertx.core.Handler) KafkaProducerRecord(io.vertx.kafka.client.producer.KafkaProducerRecord) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) ArrayList(java.util.ArrayList) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) HonoKafkaConsumer(org.eclipse.hono.client.kafka.consumer.HonoKafkaConsumer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Timeout(io.vertx.junit5.Timeout)

Example 10 with TIMEOUT

use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.

the class KafkaBasedEventSenderIT method testProducerTopicMetricsGetRemovedOnTenantDeletion.

/**
 * Verifies that the event sender causes topic-specific metrics in its underlying Kafka producer to be removed
 * when a tenant-deletion notification is sent via the vert.x event bus.
 *
 * @param ctx The vert.x text context.
 * @throws InterruptedException if test execution gets interrupted.
 */
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testProducerTopicMetricsGetRemovedOnTenantDeletion(final VertxTestContext ctx) throws InterruptedException {
    final String tenantId = "MetricsRemovalTestTenant";
    final String tenantTopicName = new HonoTopic(HonoTopic.Type.EVENT, tenantId).toString();
    final VertxTestContext setup = new VertxTestContext();
    createTopic(tenantTopicName).compose(v -> KafkaBasedEventSender.start()).compose(v -> sendEvent(tenantId, "myDeviceId", "test")).onComplete(setup.succeedingThenComplete());
    assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
    if (setup.failed()) {
        ctx.failNow(setup.causeOfFailure());
        return;
    }
    // GIVEN a started event sender that has already sent an event message
    // and the underlying Kafka producer having filled corresponding topic-specific metrics
    final var producerOptional = producerFactory.getProducer(EventConstants.EVENT_ENDPOINT);
    ctx.verify(() -> {
        assertThat(producerOptional.isPresent()).isTrue();
        assertThat(getTopicRelatedMetrics(producerOptional.get(), tenantTopicName)).isNotEmpty();
    });
    // WHEN sending a tenant-deleted notification for that tenant
    NotificationEventBusSupport.sendNotification(vertx, new TenantChangeNotification(LifecycleChange.DELETE, tenantId, Instant.now(), false));
    vertx.runOnContext(v -> {
        // THEN the metrics of the underlying producer don't contain any metrics regarding that topic
        ctx.verify(() -> assertThat(getTopicRelatedMetrics(producerOptional.get(), tenantTopicName)).isEmpty());
        ctx.completeNow();
    });
}
Also used : VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) LifecycleChange(org.eclipse.hono.notification.deviceregistry.LifecycleChange) KafkaBasedEventSender(org.eclipse.hono.client.telemetry.kafka.KafkaBasedEventSender) LoggerFactory(org.slf4j.LoggerFactory) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Timeout(io.vertx.junit5.Timeout) ArrayList(java.util.ArrayList) AfterAll(org.junit.jupiter.api.AfterAll) MessagingType(org.eclipse.hono.util.MessagingType) IntegrationTestSupport(org.eclipse.hono.tests.IntegrationTestSupport) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) MetricName(org.apache.kafka.common.MetricName) TenantChangeNotification(org.eclipse.hono.notification.deviceregistry.TenantChangeNotification) NewTopic(io.vertx.kafka.admin.NewTopic) Logger(org.slf4j.Logger) Collection(java.util.Collection) NoopTracerFactory(io.opentracing.noop.NoopTracerFactory) Promise(io.vertx.core.Promise) CachingKafkaProducerFactory(org.eclipse.hono.client.kafka.producer.CachingKafkaProducerFactory) Vertx(io.vertx.core.Vertx) RegistrationAssertion(org.eclipse.hono.util.RegistrationAssertion) Truth.assertThat(com.google.common.truth.Truth.assertThat) Instant(java.time.Instant) VertxExtension(io.vertx.junit5.VertxExtension) Collectors(java.util.stream.Collectors) AssumeMessagingSystem(org.eclipse.hono.tests.AssumeMessagingSystem) EventConstants(org.eclipse.hono.util.EventConstants) Future(io.vertx.core.Future) TenantObject(org.eclipse.hono.util.TenantObject) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) HonoTopic(org.eclipse.hono.client.kafka.HonoTopic) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Buffer(io.vertx.core.buffer.Buffer) NotificationEventBusSupport(org.eclipse.hono.notification.NotificationEventBusSupport) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) VertxTestContext(io.vertx.junit5.VertxTestContext) HonoTopic(org.eclipse.hono.client.kafka.HonoTopic) TenantChangeNotification(org.eclipse.hono.notification.deviceregistry.TenantChangeNotification) Test(org.junit.jupiter.api.Test) Timeout(io.vertx.junit5.Timeout)

Aggregations

Timeout (io.vertx.junit5.Timeout)75 VertxTestContext (io.vertx.junit5.VertxTestContext)70 Test (org.junit.jupiter.api.Test)69 TimeUnit (java.util.concurrent.TimeUnit)64 Truth.assertThat (com.google.common.truth.Truth.assertThat)62 Future (io.vertx.core.Future)53 HttpURLConnection (java.net.HttpURLConnection)50 Handler (io.vertx.core.Handler)47 IntegrationTestSupport (org.eclipse.hono.tests.IntegrationTestSupport)47 BeforeEach (org.junit.jupiter.api.BeforeEach)45 Buffer (io.vertx.core.buffer.Buffer)44 Promise (io.vertx.core.Promise)43 Vertx (io.vertx.core.Vertx)38 JsonObject (io.vertx.core.json.JsonObject)37 VertxExtension (io.vertx.junit5.VertxExtension)37 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)37 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)36 MethodSource (org.junit.jupiter.params.provider.MethodSource)34 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)33 Tenant (org.eclipse.hono.service.management.tenant.Tenant)33