use of io.vertx.kafka.admin.KafkaAdminClient in project hono by eclipse.
the class IntegrationTestSupport method deleteTenantKafkaTopics.
private Future<Void> deleteTenantKafkaTopics(final List<String> tenantsToDelete) {
if (!isUsingKafkaMessaging()) {
return Future.succeededFuture();
}
// topics for the given tenants are not deleted right away: It could be that the offset-commit interval of the CommandRouter
// command consumer (5s) hasn't elapsed yet and it has to be avoided to delete the topics before the consumer has
// committed corresponding offsets (otherwise the consumer will retry the commit for some time and be blocked during that time)
final Promise<Void> tenantTopicsDeletionDonePromise = Promise.promise();
tenantsToDeleteTopicsForAfterDelay.add(Pair.of(tenantsToDelete, Instant.now()));
final List<String> tenantsToDeleteTopicsForNow = new LinkedList<>();
final Instant nowMinusCommitInterval = Instant.now().minus(// commit interval with added buffer
AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.plusSeconds(1));
final Iterator<Pair<List<String>, Instant>> iterator = tenantsToDeleteTopicsForAfterDelay.iterator();
while (iterator.hasNext()) {
final Pair<List<String>, Instant> tenantsToDeleteAndInstantPair = iterator.next();
if (tenantsToDeleteAndInstantPair.two().isBefore(nowMinusCommitInterval)) {
tenantsToDeleteTopicsForNow.addAll(tenantsToDeleteAndInstantPair.one());
iterator.remove();
}
}
if (!tenantsToDeleteTopicsForNow.isEmpty()) {
final KafkaAdminClient adminClient = KafkaAdminClient.create(vertx, getKafkaAdminClientConfig().getAdminClientConfig("test"));
final Promise<Void> adminClientClosedPromise = Promise.promise();
LOGGER.debug("deleting topics for temporary tenants {}", tenantsToDeleteTopicsForNow);
final List<String> topicNames = tenantsToDeleteTopicsForNow.stream().flatMap(tenant -> HonoTopic.Type.MESSAGING_API_TYPES.stream().map(type -> new HonoTopic(type, tenant).toString())).collect(Collectors.toList());
adminClient.deleteTopics(topicNames, ar -> {
// note that the result will probably have failed with an UnknownTopicOrPartitionException here;
// not necessarily all tenant topics may have been created before
LOGGER.debug("done triggering deletion of topics for tenants {}", tenantsToDeleteTopicsForNow);
adminClient.close(adminClientClosedPromise);
});
adminClientClosedPromise.future().recover(thr -> {
LOGGER.warn("error closing Kafka admin client", thr);
return Future.succeededFuture();
}).onComplete(tenantTopicsDeletionDonePromise);
} else {
tenantTopicsDeletionDonePromise.complete();
}
return tenantTopicsDeletionDonePromise.future();
}
use of io.vertx.kafka.admin.KafkaAdminClient in project hono by eclipse.
the class InternalKafkaTopicCleanupServiceTest method setUp.
/**
* Sets up fixture.
*/
@BeforeEach
public void setUp() {
final Vertx vertx = mock(Vertx.class);
adapterInstanceStatusService = mock(AdapterInstanceStatusService.class);
kafkaAdminClient = mock(KafkaAdminClient.class);
internalKafkaTopicCleanupService = new InternalKafkaTopicCleanupService(vertx, adapterInstanceStatusService, kafkaAdminClient);
internalKafkaTopicCleanupService.start();
}
use of io.vertx.kafka.admin.KafkaAdminClient in project hono by eclipse.
the class InternalKafkaTopicCleanupServiceTest method testPerformCleanup.
/**
* Verifies that the service deletes topics identified as obsolete.
*/
@Test
@SuppressWarnings("unchecked")
void testPerformCleanup() {
final AtomicInteger counter = new AtomicInteger();
final String podName = "myAdapter";
final String aliveContainerId = "0ad9864b08bf";
final String deadContainerId = "000000000000";
final Set<String> toBeDeletedCmdInternalTopics = new HashSet<>();
toBeDeletedCmdInternalTopics.add(getCmdInternalTopic(podName, deadContainerId, counter.getAndIncrement()));
toBeDeletedCmdInternalTopics.add(getCmdInternalTopic(podName, deadContainerId, counter.getAndIncrement()));
toBeDeletedCmdInternalTopics.add(getCmdInternalTopic(podName, deadContainerId, counter.getAndIncrement()));
// GIVEN a number of topics
final Set<String> allTopics = new HashSet<>(toBeDeletedCmdInternalTopics);
allTopics.add("other");
allTopics.add(getCmdInternalTopic(podName, aliveContainerId, counter.getAndIncrement()));
allTopics.add(getCmdInternalTopic(podName, aliveContainerId, counter.getAndIncrement()));
allTopics.add(getCmdInternalTopic(podName, aliveContainerId, counter.getAndIncrement()));
// all adapter instances whose identifier contains the "deadContainerId" shall be identified as dead
when(adapterInstanceStatusService.getDeadAdapterInstances(any())).thenAnswer(invocation -> {
final Collection<String> adapterInstanceIdsParam = invocation.getArgument(0);
final Set<String> deadIds = adapterInstanceIdsParam.stream().filter(id -> id.contains(deadContainerId)).collect(Collectors.toSet());
return Future.succeededFuture(deadIds);
});
when(kafkaAdminClient.deleteTopics(any())).thenAnswer(invocation -> {
// remove deleted from allTopics
final List<String> topicsToDeleteParam = invocation.getArgument(0);
topicsToDeleteParam.forEach(allTopics::remove);
return Future.succeededFuture();
});
when(kafkaAdminClient.listTopics()).thenReturn(Future.succeededFuture(allTopics));
// WHEN the cleanup gets performed
internalKafkaTopicCleanupService.performCleanup();
verify(kafkaAdminClient, never()).deleteTopics(any());
// THEN the next invocation ...
internalKafkaTopicCleanupService.performCleanup();
// ... will cause the matching topics to be deleted
final var deletedTopicsCaptor = ArgumentCaptor.forClass(List.class);
verify(kafkaAdminClient).deleteTopics(deletedTopicsCaptor.capture());
assertThat(deletedTopicsCaptor.getValue()).isEqualTo(new ArrayList<>(toBeDeletedCmdInternalTopics));
}
Aggregations