use of org.eclipse.hono.client.kafka.HonoTopic in project hono by eclipse.
the class KafkaBasedEventSenderIT method testProducerTopicMetricsGetRemovedOnTenantDeletion.
/**
* Verifies that the event sender causes topic-specific metrics in its underlying Kafka producer to be removed
* when a tenant-deletion notification is sent via the vert.x event bus.
*
* @param ctx The vert.x text context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testProducerTopicMetricsGetRemovedOnTenantDeletion(final VertxTestContext ctx) throws InterruptedException {
final String tenantId = "MetricsRemovalTestTenant";
final String tenantTopicName = new HonoTopic(HonoTopic.Type.EVENT, tenantId).toString();
final VertxTestContext setup = new VertxTestContext();
createTopic(tenantTopicName).compose(v -> KafkaBasedEventSender.start()).compose(v -> sendEvent(tenantId, "myDeviceId", "test")).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
// GIVEN a started event sender that has already sent an event message
// and the underlying Kafka producer having filled corresponding topic-specific metrics
final var producerOptional = producerFactory.getProducer(EventConstants.EVENT_ENDPOINT);
ctx.verify(() -> {
assertThat(producerOptional.isPresent()).isTrue();
assertThat(getTopicRelatedMetrics(producerOptional.get(), tenantTopicName)).isNotEmpty();
});
// WHEN sending a tenant-deleted notification for that tenant
NotificationEventBusSupport.sendNotification(vertx, new TenantChangeNotification(LifecycleChange.DELETE, tenantId, Instant.now(), false));
vertx.runOnContext(v -> {
// THEN the metrics of the underlying producer don't contain any metrics regarding that topic
ctx.verify(() -> assertThat(getTopicRelatedMetrics(producerOptional.get(), tenantTopicName)).isEmpty());
ctx.completeNow();
});
}
use of org.eclipse.hono.client.kafka.HonoTopic in project hono by eclipse.
the class IntegrationTestSupport method deleteTenantKafkaTopics.
private Future<Void> deleteTenantKafkaTopics(final List<String> tenantsToDelete) {
if (!isUsingKafkaMessaging()) {
return Future.succeededFuture();
}
// topics for the given tenants are not deleted right away: It could be that the offset-commit interval of the CommandRouter
// command consumer (5s) hasn't elapsed yet and it has to be avoided to delete the topics before the consumer has
// committed corresponding offsets (otherwise the consumer will retry the commit for some time and be blocked during that time)
final Promise<Void> tenantTopicsDeletionDonePromise = Promise.promise();
tenantsToDeleteTopicsForAfterDelay.add(Pair.of(tenantsToDelete, Instant.now()));
final List<String> tenantsToDeleteTopicsForNow = new LinkedList<>();
final Instant nowMinusCommitInterval = Instant.now().minus(// commit interval with added buffer
AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.plusSeconds(1));
final Iterator<Pair<List<String>, Instant>> iterator = tenantsToDeleteTopicsForAfterDelay.iterator();
while (iterator.hasNext()) {
final Pair<List<String>, Instant> tenantsToDeleteAndInstantPair = iterator.next();
if (tenantsToDeleteAndInstantPair.two().isBefore(nowMinusCommitInterval)) {
tenantsToDeleteTopicsForNow.addAll(tenantsToDeleteAndInstantPair.one());
iterator.remove();
}
}
if (!tenantsToDeleteTopicsForNow.isEmpty()) {
final KafkaAdminClient adminClient = KafkaAdminClient.create(vertx, getKafkaAdminClientConfig().getAdminClientConfig("test"));
final Promise<Void> adminClientClosedPromise = Promise.promise();
LOGGER.debug("deleting topics for temporary tenants {}", tenantsToDeleteTopicsForNow);
final List<String> topicNames = tenantsToDeleteTopicsForNow.stream().flatMap(tenant -> HonoTopic.Type.MESSAGING_API_TYPES.stream().map(type -> new HonoTopic(type, tenant).toString())).collect(Collectors.toList());
adminClient.deleteTopics(topicNames, ar -> {
// note that the result will probably have failed with an UnknownTopicOrPartitionException here;
// not necessarily all tenant topics may have been created before
LOGGER.debug("done triggering deletion of topics for tenants {}", tenantsToDeleteTopicsForNow);
adminClient.close(adminClientClosedPromise);
});
adminClientClosedPromise.future().recover(thr -> {
LOGGER.warn("error closing Kafka admin client", thr);
return Future.succeededFuture();
}).onComplete(tenantTopicsDeletionDonePromise);
} else {
tenantTopicsDeletionDonePromise.complete();
}
return tenantTopicsDeletionDonePromise.future();
}
use of org.eclipse.hono.client.kafka.HonoTopic in project hono by eclipse.
the class KafkaBasedCommandSender method subscribeForCommandResponse.
private Future<Void> subscribeForCommandResponse(final String tenantId, final Span span) {
if (commandResponseConsumers.get(tenantId) != null) {
LOGGER.debug("command response consumer already exists for tenant [{}]", tenantId);
span.log("command response consumer already exists");
return Future.succeededFuture();
}
final Map<String, String> consumerConfig = this.consumerConfig.getConsumerConfig(HonoTopic.Type.COMMAND_RESPONSE.toString());
final String autoOffsetResetConfigValue = consumerConfig.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
// Ensure that 'auto.offset.reset' is always set to 'latest'.
if (autoOffsetResetConfigValue != null && !autoOffsetResetConfigValue.equals("latest")) {
LOGGER.warn("[auto.offset.reset] value is set to other than [latest]. It will be ignored and internally set to [latest]");
}
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
// Use a unique group-id so that all command responses for this tenant are received by this consumer.
// Thereby the responses can be correlated with the command that has been sent.
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, tenantId + "-" + UUID.randomUUID());
final String topic = new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
getCommandResponseHandler(tenantId).handle(new KafkaDownstreamMessage(record));
};
final HonoKafkaConsumer consumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig);
consumer.setPollTimeout(Duration.ofMillis(this.consumerConfig.getPollTimeout()));
Optional.ofNullable(kafkaConsumerSupplier).ifPresent(consumer::setKafkaConsumerSupplier);
return consumer.start().recover(error -> {
LOGGER.debug("error creating command response consumer for tenant [{}]", tenantId, error);
TracingHelper.logError(span, "error creating command response consumer", error);
return Future.failedFuture(error);
}).onSuccess(v -> {
LOGGER.debug("created command response consumer for tenant [{}]", tenantId);
span.log("created command response consumer");
commandResponseConsumers.put(tenantId, consumer);
});
}
use of org.eclipse.hono.client.kafka.HonoTopic in project hono by eclipse.
the class KafkaBasedCommandSenderTest method commandResponseRecord.
private ConsumerRecord<String, Buffer> commandResponseRecord(final String tenantId, final String deviceId, final String correlationId, final Integer status, final Buffer payload) {
final List<Header> headers = new ArrayList<>();
headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_TENANT_ID, tenantId.getBytes()));
headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_DEVICE_ID, deviceId.getBytes()));
headers.add(new RecordHeader(MessageHelper.SYS_PROPERTY_CORRELATION_ID, correlationId.getBytes()));
if (status != null) {
headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_STATUS, String.valueOf(status).getBytes()));
}
return new ConsumerRecord<>(new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString(), 0, 0, -1L, TimestampType.NO_TIMESTAMP_TYPE, -1L, -1, -1, deviceId, payload, new RecordHeaders(headers.toArray(Header[]::new)));
}
use of org.eclipse.hono.client.kafka.HonoTopic in project hono by eclipse.
the class KafkaBasedCommandSenderTest method sendCommandAndReceiveResponse.
private void sendCommandAndReceiveResponse(final VertxTestContext ctx, final String correlationId, final Integer responseStatus, final String responsePayload, final boolean expectSuccess, final int expectedStatusCode) {
final Context context = vertx.getOrCreateContext();
final Promise<Void> onProducerRecordSentPromise = Promise.promise();
mockProducer = new MockProducer<>(true, new StringSerializer(), new BufferSerializer()) {
@Override
public synchronized java.util.concurrent.Future<RecordMetadata> send(final ProducerRecord<String, Buffer> record, final Callback callback) {
return super.send(record, (metadata, exception) -> {
callback.onCompletion(metadata, exception);
context.runOnContext(v -> {
// decouple from current execution in order to run after the "send" result handler
onProducerRecordSentPromise.complete();
});
});
}
};
final var producerFactory = CachingKafkaProducerFactory.testFactory(vertx, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer));
commandSender = new KafkaBasedCommandSender(vertx, consumerConfig, producerFactory, producerConfig, NoopTracerFactory.create());
final Map<String, Object> headerProperties = new HashMap<>();
headerProperties.put("appKey", "appValue");
final String command = "setVolume";
final ConsumerRecord<String, Buffer> commandResponseRecord = commandResponseRecord(tenantId, deviceId, correlationId, responseStatus, Buffer.buffer(responsePayload));
final String responseTopic = new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString();
final TopicPartition responseTopicPartition = new TopicPartition(responseTopic, 0);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(responseTopicPartition));
mockConsumer.updatePartitions(responseTopicPartition, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.updateBeginningOffsets(Map.of(responseTopicPartition, 0L));
mockConsumer.updateEndOffsets(Map.of(responseTopicPartition, 0L));
onProducerRecordSentPromise.future().onComplete(ar -> {
LOG.debug("producer record sent, add command response record to mockConsumer");
// Send a command response with the same correlation id as that of the command
mockConsumer.addRecord(commandResponseRecord);
});
// This correlation id is used for both command and its response.
commandSender.setCorrelationIdSupplier(() -> correlationId);
commandSender.setKafkaConsumerSupplier(() -> mockConsumer);
context.runOnContext(v -> {
// Send a command to the device
commandSender.sendCommand(tenantId, deviceId, command, "text/plain", Buffer.buffer("test"), headerProperties).onComplete(ar -> {
ctx.verify(() -> {
if (expectSuccess) {
// assert that send operation succeeded
assertThat(ar.succeeded()).isTrue();
// Verify the command response that has been received
final DownstreamMessage<KafkaMessageContext> response = ar.result();
assertThat(response.getDeviceId()).isEqualTo(deviceId);
assertThat(response.getStatus()).isEqualTo(responseStatus);
assertThat(response.getPayload().toString()).isEqualTo(responsePayload);
} else {
// assert that send operation failed
assertThat(ar.succeeded()).isFalse();
assertThat(ar.cause()).isInstanceOf(ServiceInvocationException.class);
assertThat(((ServiceInvocationException) ar.cause()).getErrorCode()).isEqualTo(expectedStatusCode);
assertThat(ar.cause().getMessage()).isEqualTo(responsePayload);
}
});
ctx.completeNow();
mockConsumer.close();
commandSender.stop();
});
});
}
Aggregations