use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class CommandAndControlAmqpIT method testSendCommandFailsWhenNoCredit.
/**
* Verifies that the adapter immediately forwards the <em>released</em> disposition
* if there is no credit left for sending the command to the device.
* <p>
* If Kafka is used, this means a corresponding error command response is published.
*
* @param endpointConfig The endpoints to use for sending/receiving commands.
* @param ctx The vert.x test context.
* @throws InterruptedException if not all commands and responses are exchanged in time.
*/
@ParameterizedTest(name = IntegrationTestSupport.PARAMETERIZED_TEST_NAME_PATTERN)
@MethodSource("allCombinations")
@Timeout(timeUnit = TimeUnit.SECONDS, value = 10)
public void testSendCommandFailsWhenNoCredit(final AmqpCommandEndpointConfiguration endpointConfig, final VertxTestContext ctx) throws InterruptedException {
final String commandTargetDeviceId = endpointConfig.isSubscribeAsGateway() ? helper.setupGatewayDeviceBlocking(tenantId, deviceId, 5) : deviceId;
final String firstCommandSubject = "firstCommandSubject";
final Promise<Void> firstCommandReceived = Promise.promise();
final VertxTestContext setup = new VertxTestContext();
final Checkpoint setupDone = setup.checkpoint();
final Checkpoint preconditions = setup.checkpoint(1);
connectToAdapter(tenantId, deviceId, password, () -> createEventConsumer(tenantId, msg -> {
// expect empty notification with TTD -1
setup.verify(() -> assertThat(msg.getContentType()).isEqualTo(EventConstants.CONTENT_TYPE_EMPTY_NOTIFICATION));
final TimeUntilDisconnectNotification notification = msg.getTimeUntilDisconnectNotification().orElse(null);
log.info("received notification [{}]", notification);
setup.verify(() -> assertThat(notification).isNotNull());
if (notification.getTtd() == -1) {
preconditions.flag();
}
})).compose(con -> subscribeToCommands(endpointConfig, tenantId, commandTargetDeviceId)).onSuccess(recv -> {
recv.handler((delivery, msg) -> {
log.info("received command [name: {}, reply-to: {}, correlation-id: {}]", msg.getSubject(), msg.getReplyTo(), msg.getCorrelationId());
ctx.verify(() -> {
assertThat(msg.getSubject()).isEqualTo(firstCommandSubject);
});
firstCommandReceived.complete();
ProtonHelper.accepted(delivery, true);
// don't send credits
});
// just give 1 initial credit
recv.flow(1);
}).onComplete(setup.succeeding(v -> setupDone.flag()));
assertWithMessage("setup of adapter finished within %s seconds", IntegrationTestSupport.getTestSetupTimeout()).that(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
// send first command
helper.sendOneWayCommand(tenantId, commandTargetDeviceId, firstCommandSubject, "text/plain", Buffer.buffer("cmd"), helper.getSendCommandTimeout(true)).onFailure(ctx::failNow).compose(ok -> {
log.info("sent first command [subject: {}]", firstCommandSubject);
return firstCommandReceived.future();
}).compose(ok -> helper.sendCommand(tenantId, commandTargetDeviceId, "secondCommandSubject", "text/plain", Buffer.buffer("cmd"), helper.getSendCommandTimeout(false))).onComplete(ctx.failing(t -> {
ctx.verify(() -> {
assertThat(t).isInstanceOf(ServerErrorException.class);
assertThat(((ServerErrorException) t).getErrorCode()).isEqualTo(HttpURLConnection.HTTP_UNAVAILABLE);
// with no explicit credit check, the AMQP adapter would just run into the
// "waiting for delivery update" timeout (after 1s) and the error here would be caused
// by a request timeout in the sendOneWayCommand() method above
assertThat(t).isNotInstanceOf(SendMessageTimeoutException.class);
assertThat(t.getMessage()).doesNotContain("timed out");
});
ctx.completeNow();
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart.
/**
* Verifies that a HonoKafkaConsumer that is using a not yet existing topic and that is configured with
* "latest" as offset reset strategy, only receives records on the auto-created topic published after the consumer
* <em>start()</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) {
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
};
final String topic = "test_" + UUID.randomUUID();
topicsToDeleteAfterTests.add(topic);
kafkaConsumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
final Promise<Void> nextRecordReceivedPromise = Promise.promise();
nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
LOG.debug("consumer started, publish record to be received by the consumer");
final String recordKey = "addedAfterStartKey";
publish(topic, recordKey, Buffer.buffer("testPayload"));
nextRecordReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(1);
assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
});
ctx.completeNow();
});
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsForDynamicallyCreatedTopics.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
* subscription receives records published after multiple <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
* invocations have been completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsForDynamicallyCreatedTopics(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final String patternPrefix = "test_" + UUID.randomUUID() + "_";
final int numTopicsAndRecords = 3;
final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == numTopicsAndRecords) {
allRecordsReceivedPromise.complete();
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
LOG.debug("consumer started, create new topics implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
final String recordKey = "addedAfterStartKey";
for (int i = 0; i < numTopicsAndRecords; i++) {
final String topic = patternPrefix + i;
kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(topic).onComplete(ctx.succeeding(v2 -> {
LOG.debug("publish record to be received by the consumer");
publish(topic, recordKey, Buffer.buffer("testPayload"));
}));
}
allRecordsReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(numTopicsAndRecords);
receivedRecords.forEach(record -> assertThat(record.key()).isEqualTo(recordKey));
});
ctx.completeNow();
});
}));
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", numTopicsAndRecords, receivedRecords.size())));
}
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsAllRecordsAfterStart.
/**
* Verifies that a HonoKafkaConsumer configured with "earliest" as offset reset strategy receives all
* current records after the consumer <em>start()</em> method has completed.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsAllRecordsAfterStart(final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 2;
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final int totalExpectedMessages = numTopics * numTestRecordsPerTopic;
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == totalExpectedMessages) {
allRecordsReceivedPromise.complete();
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
allRecordsReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(totalExpectedMessages);
});
ctx.completeNow();
});
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class KafkaBasedEventSenderIT method testProducerTopicMetricsGetRemovedOnTenantDeletion.
/**
* Verifies that the event sender causes topic-specific metrics in its underlying Kafka producer to be removed
* when a tenant-deletion notification is sent via the vert.x event bus.
*
* @param ctx The vert.x text context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testProducerTopicMetricsGetRemovedOnTenantDeletion(final VertxTestContext ctx) throws InterruptedException {
final String tenantId = "MetricsRemovalTestTenant";
final String tenantTopicName = new HonoTopic(HonoTopic.Type.EVENT, tenantId).toString();
final VertxTestContext setup = new VertxTestContext();
createTopic(tenantTopicName).compose(v -> KafkaBasedEventSender.start()).compose(v -> sendEvent(tenantId, "myDeviceId", "test")).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
// GIVEN a started event sender that has already sent an event message
// and the underlying Kafka producer having filled corresponding topic-specific metrics
final var producerOptional = producerFactory.getProducer(EventConstants.EVENT_ENDPOINT);
ctx.verify(() -> {
assertThat(producerOptional.isPresent()).isTrue();
assertThat(getTopicRelatedMetrics(producerOptional.get(), tenantTopicName)).isNotEmpty();
});
// WHEN sending a tenant-deleted notification for that tenant
NotificationEventBusSupport.sendNotification(vertx, new TenantChangeNotification(LifecycleChange.DELETE, tenantId, Instant.now(), false));
vertx.runOnContext(v -> {
// THEN the metrics of the underlying producer don't contain any metrics regarding that topic
ctx.verify(() -> assertThat(getTopicRelatedMetrics(producerOptional.get(), tenantTopicName)).isEmpty());
ctx.completeNow();
});
}
Aggregations