use of org.eclipse.hono.commandrouter.impl.kafka.KafkaBasedCommandConsumerFactoryImpl in project hono by eclipse.
the class KafkaBasedCommandConsumerFactoryImplIT method getKafkaBasedCommandConsumerFactory.
private KafkaBasedCommandConsumerFactoryImpl getKafkaBasedCommandConsumerFactory(final Supplier<Future<Void>> targetAdapterInstanceGetterCompletionFutureSupplier, final String tenantToHandleCommandsFor) {
final KafkaProducerFactory<String, Buffer> producerFactory = CachingKafkaProducerFactory.sharedFactory(vertx);
final TenantClient tenantClient = getTenantClient();
final CommandTargetMapper commandTargetMapper = new CommandTargetMapper() {
@Override
public Future<JsonObject> getTargetGatewayAndAdapterInstance(final String tenantId, final String deviceId, final SpanContext context) {
final JsonObject jsonObject = new JsonObject();
jsonObject.put(DeviceConnectionConstants.FIELD_ADAPTER_INSTANCE_ID, adapterInstanceId);
jsonObject.put(DeviceConnectionConstants.FIELD_PAYLOAD_DEVICE_ID, deviceId);
if (!tenantId.equals(tenantToHandleCommandsFor)) {
return Future.failedFuture("ignoring command for other tenant " + tenantId);
}
if (targetAdapterInstanceGetterCompletionFutureSupplier == null) {
return Future.succeededFuture(jsonObject);
}
return targetAdapterInstanceGetterCompletionFutureSupplier.get().map(jsonObject);
}
};
final Span span = TracingMockSupport.mockSpan();
final Tracer tracer = TracingMockSupport.mockTracer(span);
final MessagingKafkaConsumerConfigProperties kafkaConsumerConfig = new MessagingKafkaConsumerConfigProperties();
kafkaConsumerConfig.setConsumerConfig(Map.of(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, IntegrationTestSupport.DOWNSTREAM_BOOTSTRAP_SERVERS));
final CommandRouterMetrics metrics = mock(CommandRouterMetrics.class);
when(metrics.startTimer()).thenReturn(Timer.start());
final var kafkaBasedCommandConsumerFactoryImpl = new KafkaBasedCommandConsumerFactoryImpl(vertx, tenantClient, commandTargetMapper, producerFactory, IntegrationTestSupport.getKafkaProducerConfig(), IntegrationTestSupport.getKafkaProducerConfig(), kafkaConsumerConfig, metrics, NoopKafkaClientMetricsSupport.INSTANCE, tracer, null);
kafkaBasedCommandConsumerFactoryImpl.setGroupId(commandRouterGroupId);
componentsToStopAfterTest.add(kafkaBasedCommandConsumerFactoryImpl);
return kafkaBasedCommandConsumerFactoryImpl;
}
use of org.eclipse.hono.commandrouter.impl.kafka.KafkaBasedCommandConsumerFactoryImpl in project hono by eclipse.
the class KafkaBasedCommandConsumerFactoryImplIT method testCommandsGetForwardedInIncomingOrder.
/**
* Verifies that records, published on the tenant-specific Kafka command topic, get received by
* the consumer created by the factory and get forwarded on the internal command topic in the
* same order they were published.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testCommandsGetForwardedInIncomingOrder(final VertxTestContext ctx) throws InterruptedException {
final String tenantId = "tenant_" + UUID.randomUUID();
final VertxTestContext setup = new VertxTestContext();
final int numTestCommands = 10;
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<String> receivedCommandSubjects = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
LOG.trace("received {}", record);
receivedCommandSubjects.add(KafkaRecordHelper.getSubject(record.headers()).orElse(""));
if (receivedRecords.size() == numTestCommands) {
allRecordsReceivedPromise.tryComplete();
}
};
final Deque<Promise<Void>> completionPromisesQueue = new LinkedList<>();
// don't let getting the target adapter instance finish immediately
// - let the futures complete in the reverse order
final Supplier<Future<Void>> targetAdapterInstanceGetterCompletionFutureSupplier = () -> {
final Promise<Void> resultPromise = Promise.promise();
completionPromisesQueue.addFirst(resultPromise);
// complete all promises in reverse order when processing the last command
if (completionPromisesQueue.size() == numTestCommands) {
completionPromisesQueue.forEach(Promise::complete);
}
return resultPromise.future();
};
final Context vertxContext = vertx.getOrCreateContext();
vertxContext.runOnContext(v0 -> {
final HonoKafkaConsumer internalConsumer = getInternalCommandConsumer(recordHandler);
final KafkaBasedCommandConsumerFactoryImpl consumerFactory = getKafkaBasedCommandConsumerFactory(targetAdapterInstanceGetterCompletionFutureSupplier, tenantId);
CompositeFuture.join(internalConsumer.start(), consumerFactory.start()).compose(f -> createCommandConsumer(tenantId, consumerFactory)).onComplete(setup.succeedingThenComplete());
});
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("command consumer started");
final List<String> sentCommandSubjects = new ArrayList<>();
IntStream.range(0, numTestCommands).forEach(i -> {
final String subject = "cmd_" + i;
sentCommandSubjects.add(subject);
sendOneWayCommand(tenantId, "myDeviceId", subject);
});
final long timerId = vertx.setTimer(8000, tid -> {
LOG.info("received records:{}{}", System.lineSeparator(), receivedRecords.stream().map(Object::toString).collect(Collectors.joining("," + System.lineSeparator())));
allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", receivedRecords.size(), numTestCommands));
});
allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> {
vertx.cancelTimer(timerId);
ctx.verify(() -> {
assertThat(receivedCommandSubjects).isEqualTo(sentCommandSubjects);
});
ctx.completeNow();
}));
}
use of org.eclipse.hono.commandrouter.impl.kafka.KafkaBasedCommandConsumerFactoryImpl in project hono by eclipse.
the class KafkaBasedCommandConsumerFactoryImplIT method testCommandsGetForwardedIfOneConsumerInstanceGetsClosed.
/**
* Verifies that records, published on the tenant-specific Kafka command topic, get received
* and forwarded by consumers created by factory instances even if one factory and its contained
* consumer gets closed in the middle of processing some of the commands.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testCommandsGetForwardedIfOneConsumerInstanceGetsClosed(final VertxTestContext ctx) throws InterruptedException {
final String tenantId = "tenant_" + UUID.randomUUID();
final VertxTestContext setup = new VertxTestContext();
// Scenario to test:
// - first command gets sent, forwarded and received without any imposed delay
// - second command gets sent, received by the factory consumer instance; processing gets blocked
// while trying to get the target adapter instance
// - for the rest of the commands, retrieval of the target adapter instance is successful, but they won't
// get forwarded until processing of the second command is finished
// - now the factory consumer gets closed and a new factory/consumer gets started; at that point
// also the processing of the second command gets finished
//
// Expected outcome:
// - processing of the second command and all following commands by the first consumer gets aborted, so that
// these commands don't get forwarded on the internal command topic
// - instead, the second consumer takes over at the offset of the first command (position must have been committed
// when closing the first consumer) and processes and forwards all commands starting with the second command
final int numTestCommands = 10;
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Promise<Void> firstRecordReceivedPromise = Promise.promise();
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<String> receivedCommandSubjects = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
LOG.trace("received {}", record);
receivedCommandSubjects.add(KafkaRecordHelper.getSubject(record.headers()).orElse(""));
if (receivedRecords.size() == 1) {
firstRecordReceivedPromise.complete();
}
if (receivedRecords.size() == numTestCommands) {
allRecordsReceivedPromise.tryComplete();
}
};
final Promise<Void> firstConsumerAllGetAdapterInstanceInvocationsDone = Promise.promise();
final LinkedList<Promise<Void>> firstConsumerGetAdapterInstancePromisesQueue = new LinkedList<>();
// don't let getting the target adapter instance finish immediately
final Supplier<Future<Void>> firstConsumerGetAdapterInstanceSupplier = () -> {
final Promise<Void> resultPromise = Promise.promise();
firstConsumerGetAdapterInstancePromisesQueue.addFirst(resultPromise);
// don't complete the future for the second command here yet
if (firstConsumerGetAdapterInstancePromisesQueue.size() != 2) {
resultPromise.complete();
}
if (firstConsumerGetAdapterInstancePromisesQueue.size() == numTestCommands) {
firstConsumerAllGetAdapterInstanceInvocationsDone.complete();
}
return resultPromise.future();
};
final AtomicReference<KafkaBasedCommandConsumerFactoryImpl> consumerFactory1Ref = new AtomicReference<>();
final Context vertxContext = vertx.getOrCreateContext();
vertxContext.runOnContext(v0 -> {
final HonoKafkaConsumer internalConsumer = getInternalCommandConsumer(recordHandler);
final KafkaBasedCommandConsumerFactoryImpl consumerFactory1 = getKafkaBasedCommandConsumerFactory(firstConsumerGetAdapterInstanceSupplier, tenantId);
consumerFactory1Ref.set(consumerFactory1);
CompositeFuture.join(internalConsumer.start(), consumerFactory1.start()).compose(f -> createCommandConsumer(tenantId, consumerFactory1)).onComplete(setup.succeedingThenComplete());
});
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("command consumer started");
final List<String> sentCommandSubjects = new ArrayList<>();
IntStream.range(0, numTestCommands).forEach(i -> {
final String subject = "cmd_" + i;
sentCommandSubjects.add(subject);
sendOneWayCommand(tenantId, "myDeviceId", subject);
});
final AtomicInteger secondConsumerGetAdapterInstanceInvocations = new AtomicInteger();
// wait for first record on internal topic to have been received ...
CompositeFuture.join(firstConsumerAllGetAdapterInstanceInvocationsDone.future(), firstRecordReceivedPromise.future()).compose(v -> {
// ... and wait some more, making sure that the offset of the first record has been committed
final Promise<Void> delayPromise = Promise.promise();
vertx.setTimer(500, tid -> delayPromise.complete());
return delayPromise.future();
}).onComplete(v -> {
LOG.info("stopping first consumer factory");
consumerFactory1Ref.get().stop().onComplete(ctx.succeeding(ar -> {
LOG.info("factory stopped");
// no delay on getting the target adapter instance added here
final KafkaBasedCommandConsumerFactoryImpl consumerFactory2 = getKafkaBasedCommandConsumerFactory(() -> {
secondConsumerGetAdapterInstanceInvocations.incrementAndGet();
return Future.succeededFuture();
}, tenantId);
consumerFactory2.start().onComplete(ctx.succeeding(ar2 -> {
LOG.info("creating command consumer in new consumer factory");
createCommandConsumer(tenantId, consumerFactory2).onComplete(ctx.succeeding(ar3 -> {
LOG.debug("consumer created");
firstConsumerGetAdapterInstancePromisesQueue.forEach(Promise::tryComplete);
}));
}));
}));
});
final long timerId = vertx.setTimer(8000, tid -> {
LOG.info("received records:\n{}", receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n")));
allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", receivedRecords.size(), numTestCommands));
});
allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> {
vertx.cancelTimer(timerId);
ctx.verify(() -> {
assertThat(receivedCommandSubjects).isEqualTo(sentCommandSubjects);
// all but the first command should have been processed by the second consumer
assertThat(secondConsumerGetAdapterInstanceInvocations.get()).isEqualTo(numTestCommands - 1);
});
ctx.completeNow();
}));
}
use of org.eclipse.hono.commandrouter.impl.kafka.KafkaBasedCommandConsumerFactoryImpl in project hono by eclipse.
the class Application method commandConsumerFactoryProvider.
private MessagingClientProvider<CommandConsumerFactory> commandConsumerFactoryProvider(final TenantClient tenantClient, final CommandTargetMapper commandTargetMapper) {
final MessagingClientProvider<CommandConsumerFactory> commandConsumerFactoryProvider = new MessagingClientProvider<>();
if (kafkaConsumerConfig.isConfigured() && commandResponseKafkaProducerConfig.isConfigured() && commandInternalKafkaProducerConfig.isConfigured()) {
final KafkaProducerFactory<String, Buffer> kafkaProducerFactory = CachingKafkaProducerFactory.sharedFactory(vertx);
kafkaProducerFactory.setMetricsSupport(kafkaClientMetricsSupport);
if (internalKafkaTopicCleanupService == null && commandInternalKafkaProducerConfig.isConfigured() && kafkaConsumerConfig.isConfigured() && kafkaAdminClientConfig.isConfigured() && !(adapterInstanceStatusService instanceof AdapterInstanceStatusService.UnknownStatusProvidingService)) {
internalKafkaTopicCleanupService = new InternalKafkaTopicCleanupService(vertx, adapterInstanceStatusService, kafkaAdminClientConfig);
}
commandConsumerFactoryProvider.setClient(new KafkaBasedCommandConsumerFactoryImpl(vertx, tenantClient, commandTargetMapper, kafkaProducerFactory, commandInternalKafkaProducerConfig, commandResponseKafkaProducerConfig, kafkaConsumerConfig, metrics, kafkaClientMetricsSupport, tracer, internalKafkaTopicCleanupService));
}
if (commandConsumerConnectionConfig.isHostConfigured()) {
commandConsumerFactoryProvider.setClient(new ProtonBasedCommandConsumerFactoryImpl(HonoConnection.newConnection(vertx, commandConsumerConnectionConfig, tracer), tenantClient, commandTargetMapper, metrics, SendMessageSampler.Factory.noop()));
}
return commandConsumerFactoryProvider;
}
Aggregations