use of io.vertx.core.Promise in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnRebalance.
/**
* Verifies that the consumer commits the last fully handled records on rebalance.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testConsumerCommitsOffsetsOnRebalance(final VertxTestContext ctx) throws InterruptedException {
final int numTestRecords = 5;
final VertxTestContext receivedRecordsCtx = new VertxTestContext();
final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
final Promise<Void> promise = Promise.promise();
if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
}
receivedRecordsCheckpoint.flag();
return promise.future();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// periodic commit shall not play a role here
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
IntStream.range(0, numTestRecords).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (receivedRecordsCtx.failed()) {
ctx.failNow(receivedRecordsCtx.causeOfFailure());
return;
}
// records received, complete the handling of some of them
recordsHandlingPromiseMap.get(0L).complete();
recordsHandlingPromiseMap.get(1L).complete();
// offset 3 not completed yet, hence offset 1 is the latest in the row of fully handled records
final AtomicInteger latestFullyHandledOffset = new AtomicInteger(1);
recordsHandlingPromiseMap.get(4L).complete();
// define VertxTestContexts for 3 checks (3x rebalance/commit)
final AtomicInteger checkIndex = new AtomicInteger(0);
final List<VertxTestContext> commitCheckContexts = IntStream.range(0, 3).mapToObj(i -> new VertxTestContext()).collect(Collectors.toList());
final List<Checkpoint> commitCheckpoints = commitCheckContexts.stream().map(c -> c.checkpoint(1)).collect(Collectors.toList());
final InterruptableSupplier<Boolean> waitForCurrentCommitCheckResult = () -> {
assertWithMessage("partition assigned in 5s for checking of commits").that(commitCheckContexts.get(checkIndex.get()).awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (commitCheckContexts.get(checkIndex.get()).failed()) {
ctx.failNow(commitCheckContexts.get(checkIndex.get()).causeOfFailure());
return false;
}
return true;
};
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
ctx.verify(() -> {
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset.get() + 1L);
});
commitCheckpoints.get(checkIndex.get()).flag();
});
// now force a rebalance which should trigger the above onPartitionsAssignedHandler
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (!waitForCurrentCommitCheckResult.get()) {
return;
}
checkIndex.incrementAndGet();
// now another rebalance (ie. commit trigger) - no change in offsets
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (!waitForCurrentCommitCheckResult.get()) {
return;
}
checkIndex.incrementAndGet();
// now complete some more promises
recordsHandlingPromiseMap.get(2L).complete();
recordsHandlingPromiseMap.get(3L).complete();
// offset 4 already complete
latestFullyHandledOffset.set(4);
// again rebalance/commit
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (waitForCurrentCommitCheckResult.get()) {
ctx.completeNow();
}
}
use of io.vertx.core.Promise in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion.
/**
* Verifies that the consumer commits record offsets on rebalance, having waited some time for record
* handling to be completed.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion(final VertxTestContext ctx) throws InterruptedException {
final int numTestRecords = 5;
final VertxTestContext receivedRecordsCtx = new VertxTestContext();
final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
final Promise<Void> promise = Promise.promise();
if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
}
receivedRecordsCheckpoint.flag();
return promise.future();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// periodic commit shall not play a role here
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "21000");
final AtomicReference<Handler<Void>> onNextPartitionsRevokedBlockingHandlerRef = new AtomicReference<>();
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig) {
@Override
protected void onPartitionsRevokedBlocking(final Set<io.vertx.kafka.client.common.TopicPartition> partitionsSet) {
Optional.ofNullable(onNextPartitionsRevokedBlockingHandlerRef.get()).ifPresent(handler -> handler.handle(null));
onNextPartitionsRevokedBlockingHandlerRef.set(null);
super.onPartitionsRevokedBlocking(partitionsSet);
}
};
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
final Context consumerVertxContext = vertx.getOrCreateContext();
consumerVertxContext.runOnContext(v -> {
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
IntStream.range(0, numTestRecords).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
});
assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (receivedRecordsCtx.failed()) {
ctx.failNow(receivedRecordsCtx.causeOfFailure());
return;
}
// records received, complete the handling of all except the first 2 records
LongStream.range(2, numTestRecords).forEach(offset -> recordsHandlingPromiseMap.get(offset).complete());
ctx.verify(() -> assertThat(recordsHandlingPromiseMap.get(1L).future().isComplete()).isFalse());
// partitions revoked handler shall get called after the blocking partitions-revoked handling has waited for the records to be marked as completed
consumer.setOnPartitionsRevokedHandler(s -> {
ctx.verify(() -> assertThat(recordsHandlingPromiseMap.get(1L).future().isComplete()).isTrue());
});
final Checkpoint commitCheckDone = ctx.checkpoint(1);
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
ctx.verify(() -> {
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(numTestRecords);
});
commitCheckDone.flag();
});
// trigger a rebalance where the currently assigned partition is revoked
// (and then assigned again - otherwise its offset wouldn't be returned by mockConsumer.committed())
// the remaining 2 records are to be marked as completed with some delay
onNextPartitionsRevokedBlockingHandlerRef.set(v -> {
consumerVertxContext.runOnContext(v2 -> {
recordsHandlingPromiseMap.get(0L).complete();
recordsHandlingPromiseMap.get(1L).complete();
});
});
mockConsumer.setRevokeAllOnRebalance(true);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
mockConsumer.setNextPollRebalancePartitionAssignment(List.of(TOPIC_PARTITION, TOPIC2_PARTITION));
}
use of io.vertx.core.Promise in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsInitialOffset.
/**
* Verifies that the consumer commits the initial partition offset on the first offset commit after
* the partition got assigned to the consumer.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testConsumerCommitsInitialOffset(final VertxTestContext ctx) throws InterruptedException {
final Promise<Void> testRecordsReceived = Promise.promise();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
testRecordsReceived.complete();
return Future.succeededFuture();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// 1000ms commit interval - keep the value not too low,
// otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
final VertxTestContext consumerStartedCtx = new VertxTestContext();
final Checkpoint consumerStartedCheckpoint = consumerStartedCtx.checkpoint(2);
consumer.setOnRebalanceDoneHandler(s -> consumerStartedCheckpoint.flag());
vertx.getOrCreateContext().runOnContext(v -> {
consumer.start().onSuccess(v2 -> consumerStartedCheckpoint.flag());
});
assertWithMessage("consumer started in 5s").that(consumerStartedCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (consumerStartedCtx.failed()) {
ctx.failNow(consumerStartedCtx.causeOfFailure());
return;
}
final List<Map<TopicPartition, OffsetAndMetadata>> reportedCommits = new ArrayList<>();
mockConsumer.addCommitListener(reportedCommits::add);
final CountDownLatch rebalance1Done = new CountDownLatch(1);
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION, TOPIC2_PARTITION));
ctx.verify(() -> {
// the rebalance where topicPartition got revoked should have triggered a commit of offset 0 for topicPartition
assertThat(reportedCommits.size()).isEqualTo(1);
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(0);
});
});
consumer.setOnRebalanceDoneHandler(s -> rebalance1Done.countDown());
// now force a rebalance which should trigger the above onPartitionsAssignedHandler
mockConsumer.updateBeginningOffsets(Map.of(TOPIC2_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC2_PARTITION, 0L));
mockConsumer.rebalance(List.of(TOPIC2_PARTITION));
if (!rebalance1Done.await(5, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException("partitionsAssigned handler not invoked"));
}
final CountDownLatch rebalance2Done = new CountDownLatch(1);
consumer.setOnPartitionsAssignedHandler(partitions -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION, TOPIC2_PARTITION));
ctx.verify(() -> {
// the 2nd rebalance where topic2Partition got revoked and topicPartition got assigned
// should have triggered a commit of offset 0 for topic2Partition
assertThat(reportedCommits.size()).isEqualTo(2);
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC2_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(0);
});
});
consumer.setOnRebalanceDoneHandler(s -> rebalance2Done.countDown());
// now again force a rebalance which should trigger the above onPartitionsAssignedHandler
// - this time again with the first partition
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.rebalance(List.of(TOPIC_PARTITION));
if (!rebalance2Done.await(5, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException("partitionsAssigned handler not invoked"));
}
consumer.setOnPartitionsAssignedHandler(partitions -> {
ctx.verify(() -> {
// the 3rd rebalance where all partitions got revoked should have triggered no new commits
assertThat(reportedCommits.size()).isEqualTo(2);
});
ctx.completeNow();
});
// now force a 3rd rebalance, assigning no partition
mockConsumer.rebalance(List.of());
}
use of io.vertx.core.Promise in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method testConsumerCommitsOffsetsOnStop.
/**
* Verifies that the consumer commits the last fully handled records when it is stopped.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if the test execution gets interrupted.
*/
@Test
public void testConsumerCommitsOffsetsOnStop(final VertxTestContext ctx) throws InterruptedException {
final int numTestRecords = 5;
final VertxTestContext receivedRecordsCtx = new VertxTestContext();
final Checkpoint receivedRecordsCheckpoint = receivedRecordsCtx.checkpoint(numTestRecords);
final Map<Long, Promise<Void>> recordsHandlingPromiseMap = new HashMap<>();
final Function<KafkaConsumerRecord<String, Buffer>, Future<Void>> handler = record -> {
final Promise<Void> promise = Promise.promise();
if (recordsHandlingPromiseMap.put(record.offset(), promise) != null) {
receivedRecordsCtx.failNow(new IllegalStateException("received record with duplicate offset"));
}
receivedRecordsCheckpoint.flag();
return promise.future();
};
final Map<String, String> consumerConfig = consumerConfigProperties.getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
// periodic commit shall not play a role here
consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000");
consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0");
consumer = new AsyncHandlingAutoCommitKafkaConsumer(vertx, Set.of(TOPIC), handler, consumerConfig);
consumer.setKafkaConsumerSupplier(() -> mockConsumer);
mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L));
mockConsumer.updatePartitions(TOPIC_PARTITION, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION));
consumer.start().onComplete(ctx.succeeding(v2 -> {
mockConsumer.schedulePollTask(() -> {
IntStream.range(0, numTestRecords).forEach(offset -> {
mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer()));
});
});
}));
assertWithMessage("records received in 5s").that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)).isTrue();
if (receivedRecordsCtx.failed()) {
ctx.failNow(receivedRecordsCtx.causeOfFailure());
return;
}
// records received, complete the handling of some of them
recordsHandlingPromiseMap.get(0L).complete();
recordsHandlingPromiseMap.get(1L).complete();
// offset 3 not completed yet, hence offset 1 is the latest in the row of fully handled records
final int latestFullyHandledOffset = 1;
recordsHandlingPromiseMap.get(4L).complete();
// otherwise mockConsumer committed() can't be called
mockConsumer.setSkipSettingClosedFlagOnNextClose();
// now close the consumer
consumer.stop().onComplete(v -> {
final Map<TopicPartition, OffsetAndMetadata> committed = mockConsumer.committed(Set.of(TOPIC_PARTITION));
ctx.verify(() -> {
final OffsetAndMetadata offsetAndMetadata = committed.get(TOPIC_PARTITION);
assertThat(offsetAndMetadata).isNotNull();
assertThat(offsetAndMetadata.offset()).isEqualTo(latestFullyHandledOffset + 1L);
});
ctx.completeNow();
});
}
use of io.vertx.core.Promise in project hono by eclipse.
the class KafkaBasedMappingAndDelegatingCommandHandlerTest method testCommandDelegationOrderWithMappingFailedForFirstEntry.
/**
* Verifies the behaviour of the
* {@link KafkaBasedMappingAndDelegatingCommandHandler#mapAndDelegateIncomingCommandMessage(KafkaConsumerRecord)}
* method in a scenario where the rather long-running processing of a command delays subsequent, already mapped
* commands from getting delegated to the target adapter instance. After the processing of the first command finally
* resulted in an error, the subsequent commands shall get delegated in the correct order.
*
* @param ctx The vert.x test context
*/
@Test
public void testCommandDelegationOrderWithMappingFailedForFirstEntry(final VertxTestContext ctx) {
final String deviceId1 = "device1";
final String deviceId2 = "device2";
final String deviceId3 = "device3";
final String deviceId4 = "device4";
// GIVEN valid command records
final KafkaConsumerRecord<String, Buffer> commandRecord1 = getCommandRecord(tenantId, deviceId1, "subject1", 0, 1);
final KafkaConsumerRecord<String, Buffer> commandRecord2 = getCommandRecord(tenantId, deviceId2, "subject2", 0, 2);
final KafkaConsumerRecord<String, Buffer> commandRecord3 = getCommandRecord(tenantId, deviceId3, "subject3", 0, 3);
final KafkaConsumerRecord<String, Buffer> commandRecord4 = getCommandRecord(tenantId, deviceId4, "subject4", 0, 4);
// WHEN getting the target adapter instances for the commands results in different delays for each command
// so that the invocations are completed with the order: commandRecord3, commandRecord2, commandRecord1 (failed), commandRecord4
// with command 1 getting failed
final Promise<JsonObject> resultForCommand1 = Promise.promise();
when(commandTargetMapper.getTargetGatewayAndAdapterInstance(eq(tenantId), eq(deviceId1), any())).thenReturn(resultForCommand1.future());
final Promise<JsonObject> resultForCommand2 = Promise.promise();
when(commandTargetMapper.getTargetGatewayAndAdapterInstance(eq(tenantId), eq(deviceId2), any())).thenReturn(resultForCommand2.future());
final Promise<JsonObject> resultForCommand3 = Promise.promise();
when(commandTargetMapper.getTargetGatewayAndAdapterInstance(eq(tenantId), eq(deviceId3), any())).thenReturn(resultForCommand3.future());
doAnswer(invocation -> {
resultForCommand3.complete(createTargetAdapterInstanceJson(deviceId3, adapterInstanceId));
resultForCommand2.complete(createTargetAdapterInstanceJson(deviceId2, adapterInstanceId));
resultForCommand1.fail("mapping of command 1 failed for some reason");
return Future.succeededFuture(createTargetAdapterInstanceJson(deviceId4, adapterInstanceId));
}).when(commandTargetMapper).getTargetGatewayAndAdapterInstance(eq(tenantId), eq(deviceId4), any());
// WHEN mapping and delegating the commands
final Future<Void> cmd1Future = cmdHandler.mapAndDelegateIncomingCommandMessage(commandRecord1);
final Future<Void> cmd2Future = cmdHandler.mapAndDelegateIncomingCommandMessage(commandRecord2);
final Future<Void> cmd3Future = cmdHandler.mapAndDelegateIncomingCommandMessage(commandRecord3);
final Future<Void> cmd4Future = cmdHandler.mapAndDelegateIncomingCommandMessage(commandRecord4);
// THEN the messages are delegated in the original order, with command 1 left out because it timed out
CompositeFuture.all(cmd2Future, cmd3Future, cmd4Future).onComplete(ctx.succeeding(r -> {
ctx.verify(() -> {
assertThat(cmd1Future.failed()).isTrue();
final ArgumentCaptor<CommandContext> commandContextCaptor = ArgumentCaptor.forClass(CommandContext.class);
verify(internalCommandSender, times(3)).sendCommand(commandContextCaptor.capture(), anyString());
final List<CommandContext> capturedCommandContexts = commandContextCaptor.getAllValues();
assertThat(capturedCommandContexts.get(0).getCommand().getDeviceId()).isEqualTo(deviceId2);
assertThat(capturedCommandContexts.get(1).getCommand().getDeviceId()).isEqualTo(deviceId3);
assertThat(capturedCommandContexts.get(2).getCommand().getDeviceId()).isEqualTo(deviceId4);
});
ctx.completeNow();
}));
}
Aggregations