use of org.eclipse.hono.util.Pair in project hono by eclipse.
the class IntegrationTestSupport method deleteTenantKafkaTopics.
private Future<Void> deleteTenantKafkaTopics(final List<String> tenantsToDelete) {
if (!isUsingKafkaMessaging()) {
return Future.succeededFuture();
}
// topics for the given tenants are not deleted right away: It could be that the offset-commit interval of the CommandRouter
// command consumer (5s) hasn't elapsed yet and it has to be avoided to delete the topics before the consumer has
// committed corresponding offsets (otherwise the consumer will retry the commit for some time and be blocked during that time)
final Promise<Void> tenantTopicsDeletionDonePromise = Promise.promise();
tenantsToDeleteTopicsForAfterDelay.add(Pair.of(tenantsToDelete, Instant.now()));
final List<String> tenantsToDeleteTopicsForNow = new LinkedList<>();
final Instant nowMinusCommitInterval = Instant.now().minus(// commit interval with added buffer
AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.plusSeconds(1));
final Iterator<Pair<List<String>, Instant>> iterator = tenantsToDeleteTopicsForAfterDelay.iterator();
while (iterator.hasNext()) {
final Pair<List<String>, Instant> tenantsToDeleteAndInstantPair = iterator.next();
if (tenantsToDeleteAndInstantPair.two().isBefore(nowMinusCommitInterval)) {
tenantsToDeleteTopicsForNow.addAll(tenantsToDeleteAndInstantPair.one());
iterator.remove();
}
}
if (!tenantsToDeleteTopicsForNow.isEmpty()) {
final KafkaAdminClient adminClient = KafkaAdminClient.create(vertx, getKafkaAdminClientConfig().getAdminClientConfig("test"));
final Promise<Void> adminClientClosedPromise = Promise.promise();
LOGGER.debug("deleting topics for temporary tenants {}", tenantsToDeleteTopicsForNow);
final List<String> topicNames = tenantsToDeleteTopicsForNow.stream().flatMap(tenant -> HonoTopic.Type.MESSAGING_API_TYPES.stream().map(type -> new HonoTopic(type, tenant).toString())).collect(Collectors.toList());
adminClient.deleteTopics(topicNames, ar -> {
// note that the result will probably have failed with an UnknownTopicOrPartitionException here;
// not necessarily all tenant topics may have been created before
LOGGER.debug("done triggering deletion of topics for tenants {}", tenantsToDeleteTopicsForNow);
adminClient.close(adminClientClosedPromise);
});
adminClientClosedPromise.future().recover(thr -> {
LOGGER.warn("error closing Kafka admin client", thr);
return Future.succeededFuture();
}).onComplete(tenantTopicsDeletionDonePromise);
} else {
tenantTopicsDeletionDonePromise.complete();
}
return tenantTopicsDeletionDonePromise.future();
}
use of org.eclipse.hono.util.Pair in project hono by eclipse.
the class LoraProtocolAdapter method handleCommand.
private void handleCommand(final CommandContext commandContext) {
Tags.COMPONENT.set(commandContext.getTracingSpan(), getTypeName());
final Sample timer = metrics.startTimer();
final Command command = commandContext.getCommand();
if (command.getGatewayId() == null) {
final String errorMsg = "no gateway defined for command";
LOG.debug("{} [{}]", errorMsg, command);
commandContext.release(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, errorMsg));
return;
}
final String tenant = command.getTenant();
final String gatewayId = command.getGatewayId();
final LoraProvider loraProvider = Optional.ofNullable(commandSubscriptions.get(new SubscriptionKey(tenant, gatewayId))).map(Pair::two).orElse(null);
if (loraProvider == null) {
LOG.debug("received command for unknown gateway [{}] for tenant [{}]", gatewayId, tenant);
TracingHelper.logError(commandContext.getTracingSpan(), String.format("received command for unknown gateway [%s]", gatewayId));
commandContext.release(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "received command for unknown gateway"));
return;
}
final Future<TenantObject> tenantTracker = getTenantConfiguration(tenant, commandContext.getTracingContext());
tenantTracker.compose(tenantObject -> {
if (command.isValid()) {
return checkMessageLimit(tenantObject, command.getPayloadSize(), commandContext.getTracingContext());
} else {
return Future.failedFuture(new ClientErrorException(HttpURLConnection.HTTP_BAD_REQUEST, "malformed command message"));
}
}).compose(success -> getRegistrationClient().assertRegistration(tenant, gatewayId, null, commandContext.getTracingContext())).compose(registrationAssertion -> sendCommandToGateway(commandContext, loraProvider, registrationAssertion.getCommandEndpoint())).onSuccess(aVoid -> {
addMicrometerSample(commandContext, timer);
commandContext.accept();
metrics.reportCommand(command.isOneWay() ? Direction.ONE_WAY : Direction.REQUEST, tenant, tenantTracker.result(), MetricsTags.ProcessingOutcome.FORWARDED, command.getPayloadSize(), timer);
}).onFailure(t -> {
LOG.debug("error sending command", t);
commandContext.release(t);
metrics.reportCommand(command.isOneWay() ? Direction.ONE_WAY : Direction.REQUEST, tenant, tenantTracker.result(), MetricsTags.ProcessingOutcome.from(t), command.getPayloadSize(), timer);
});
}
use of org.eclipse.hono.util.Pair in project hono by eclipse.
the class CommandRouterServiceImpl method activateCommandRouting.
private void activateCommandRouting(final Pair<String, Integer> attempt, final SpanContext tracingContext) {
if (!running.get()) {
// component has been stopped, no need to create command consumer in this case
tenantsInProcess.remove(attempt.one());
return;
}
final Span span = tracer.buildSpan("re-enable command routing for tenant").addReference(References.FOLLOWS_FROM, tracingContext).withTag(TracingHelper.TAG_TENANT_ID, attempt.one()).start();
final var logEntries = new HashMap<String, Object>(2);
logEntries.put("attempt#", attempt.two());
tenantClient.get(attempt.one(), span.context()).map(tenantObject -> commandConsumerFactoryProvider.getClient(tenantObject)).map(factory -> factory.createCommandConsumer(attempt.one(), span.context())).onSuccess(ok -> {
logEntries.put(Fields.MESSAGE, "successfully created command consumer");
span.log(logEntries);
reenabledTenants.add(attempt.one());
}).onFailure(t -> {
logEntries.put(Fields.MESSAGE, "failed to create command consumer");
logEntries.put(Fields.ERROR_OBJECT, t);
TracingHelper.logError(span, logEntries);
if (t instanceof ServerErrorException) {
// add to end of queue in order to retry at a later time
LOG.info("failed to create command consumer [attempt#: {}]", attempt.two(), t);
span.log("marking tenant for later re-try to create command consumer");
tenantsToEnable.addLast(Pair.of(attempt.one(), attempt.two() + 1));
}
}).onComplete(r -> {
span.finish();
tenantsInProcess.remove(attempt.one());
processTenantQueue(tracingContext);
});
}
use of org.eclipse.hono.util.Pair in project hono by eclipse.
the class HonoKafkaConsumer method subscribeAndWaitForRebalance.
private Future<Void> subscribeAndWaitForRebalance() {
if (stopCalled.get()) {
return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "already stopped"));
}
final Promise<Void> partitionAssignmentDone = Promise.promise();
final Promise<Void> subscriptionUpdated = Promise.promise();
final Pair<Promise<Void>, Promise<Void>> newPromisePair = Pair.of(subscriptionUpdated, partitionAssignmentDone);
final var promisePair = subscriptionUpdatedAndPartitionsAssignedPromiseRef.updateAndGet(promise -> promise == null ? newPromisePair : promise);
if (!promisePair.equals(newPromisePair)) {
log.debug("subscribeAndWaitForRebalance: will wait for ongoing invocation to complete");
return CompositeFuture.all(promisePair.one().future(), promisePair.two().future()).mapEmpty();
}
if (topicPattern != null) {
kafkaConsumer.subscribe(topicPattern, subscriptionUpdated);
} else {
// Trigger retrieval of metadata for each of the subscription topics if not already available locally;
// this will also trigger topic auto-creation if the topic doesn't exist yet.
// Doing so before the "subscribe" invocation shall ensure that these partitions are considered for
// partition assignment.
topics.forEach(topic -> HonoKafkaConsumerHelper.partitionsFor(kafkaConsumer, topic).onSuccess(partitions -> {
if (partitions.isEmpty()) {
log.info("subscription topic doesn't exist and didn't get auto-created: {} [client-id: {}]", topic, getClientId());
}
}));
kafkaConsumer.subscribe(topics, subscriptionUpdated);
}
// init kafkaConsumerWorker if needed; it has to be retrieved after the first "subscribe" invocation
if (kafkaConsumerWorker == null) {
kafkaConsumerWorker = getKafkaConsumerWorker(kafkaConsumer);
}
vertx.setTimer(WAIT_FOR_REBALANCE_TIMEOUT_MILLIS, ar -> {
if (!partitionAssignmentDone.future().isComplete()) {
subscriptionUpdatedAndPartitionsAssignedPromiseRef.compareAndSet(promisePair, null);
final String errorMsg = "timed out waiting for rebalance and update of subscribed topics";
log.warn(errorMsg);
partitionAssignmentDone.tryFail(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, errorMsg));
}
});
subscriptionUpdated.future().onFailure(thr -> {
subscriptionUpdatedAndPartitionsAssignedPromiseRef.compareAndSet(promisePair, null);
});
return CompositeFuture.all(subscriptionUpdated.future(), partitionAssignmentDone.future()).mapEmpty();
}
Aggregations