use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class PushRouting method connectToHosts.
/**
* Connects to all of the hosts provided.
* @return A future for a PushConnectionsHandle, which can be used to terminate connections.
*/
@SuppressWarnings("checkstyle:ParameterNumber")
private CompletableFuture<PushConnectionsHandle> connectToHosts(final ServiceContext serviceContext, final PushPhysicalPlanManager pushPhysicalPlanManager, final ConfiguredStatement<Query> statement, final Collection<KsqlNode> hosts, final LogicalSchema outputSchema, final TransientQueryQueue transientQueryQueue, final PushConnectionsHandle pushConnectionsHandle, final boolean dynamicallyAddedNode, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics, final Set<KsqlNode> catchupHosts, final PushRoutingOptions pushRoutingOptions, final String thisHostName) {
final Map<KsqlNode, CompletableFuture<RoutingResult>> futureMap = new LinkedHashMap<>();
for (final KsqlNode node : hosts) {
pushConnectionsHandle.add(node, new RoutingResult(RoutingResultStatus.IN_PROGRESS, () -> {
}));
final CompletableFuture<Void> callback = new CompletableFuture<>();
callback.handle((v, t) -> {
if (t == null) {
pushConnectionsHandle.get(node).ifPresent(result -> {
result.close();
result.updateStatus(RoutingResultStatus.COMPLETE);
});
LOG.info("Host {} completed request {}.", node, pushPhysicalPlanManager.getQueryId());
} else if (t instanceof GapFoundException) {
pushConnectionsHandle.get(node).ifPresent(result -> {
result.close();
result.updateStatus(RoutingResultStatus.OFFSET_GAP_FOUND);
});
} else {
pushConnectionsHandle.completeExceptionally(t);
}
return null;
});
futureMap.put(node, executeOrRouteQuery(node, statement, serviceContext, pushPhysicalPlanManager, outputSchema, transientQueryQueue, callback, scalablePushQueryMetrics, pushConnectionsHandle.getOffsetsTracker(), catchupHosts.contains(node), pushRoutingOptions, thisHostName));
}
return CompletableFuture.allOf(futureMap.values().toArray(new CompletableFuture[0])).thenApply(v -> {
for (final KsqlNode node : hosts) {
final CompletableFuture<RoutingResult> future = futureMap.get(node);
final RoutingResult routingResult = future.join();
pushConnectionsHandle.add(node, routingResult);
}
return pushConnectionsHandle;
}).exceptionally(t -> {
final KsqlNode node = futureMap.entrySet().stream().filter(e -> e.getValue().isCompletedExceptionally()).map(Entry::getKey).findFirst().orElse(null);
for (KsqlNode n : hosts) {
final CompletableFuture<RoutingResult> future = futureMap.get(n);
// Take whatever completed exceptionally and mark it as failed
if (future.isCompletedExceptionally()) {
pushConnectionsHandle.get(n).ifPresent(result -> result.updateStatus(RoutingResultStatus.FAILED));
} else {
final RoutingResult routingResult = future.join();
pushConnectionsHandle.add(node, routingResult);
}
}
LOG.warn("Error routing query {} id {} to host {} at timestamp {} with exception {}", statement.getStatementText(), pushPhysicalPlanManager.getQueryId(), node, System.currentTimeMillis(), t.getCause());
// retries in that case and don't fail the original request.
if (!dynamicallyAddedNode) {
pushConnectionsHandle.completeExceptionally(new KsqlException(String.format("Unable to execute push query \"%s\". %s", statement.getStatementText(), t.getCause().getMessage())));
}
return pushConnectionsHandle;
}).exceptionally(t -> {
LOG.error("Unexpected error handing exception", t);
return pushConnectionsHandle;
});
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class PushRouting method checkForNewHosts.
private void checkForNewHosts(final ServiceContext serviceContext, final PushPhysicalPlanManager pushPhysicalPlanManager, final ConfiguredStatement<Query> statement, final LogicalSchema outputSchema, final TransientQueryQueue transientQueryQueue, final PushConnectionsHandle pushConnectionsHandle, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics, final PushRoutingOptions pushRoutingOptions, final String thisHostName) {
VertxUtils.checkContext(pushPhysicalPlanManager.getContext());
if (pushConnectionsHandle.isClosed()) {
return;
}
final Set<KsqlNode> updatedHosts = registryToNodes.apply(pushPhysicalPlanManager.getScalablePushRegistry());
final Set<KsqlNode> hosts = pushConnectionsHandle.getActiveHosts();
final Set<KsqlNode> newHosts = Sets.difference(updatedHosts, hosts).stream().filter(node -> pushConnectionsHandle.get(node).map(routingResult -> routingResult.getStatus() != RoutingResultStatus.IN_PROGRESS).orElse(true)).collect(Collectors.toSet());
final Set<KsqlNode> removedHosts = Sets.difference(hosts, updatedHosts);
if (newHosts.size() > 0) {
LOG.info("Dynamically adding new hosts {} for {}", newHosts, pushPhysicalPlanManager.getQueryId());
final Set<KsqlNode> catchupHosts = newHosts.stream().filter(node -> pushConnectionsHandle.get(node).map(routingResult -> routingResult.getStatus() == RoutingResultStatus.OFFSET_GAP_FOUND).orElse(false)).collect(Collectors.toSet());
connectToHosts(serviceContext, pushPhysicalPlanManager, statement, newHosts, outputSchema, transientQueryQueue, pushConnectionsHandle, true, scalablePushQueryMetrics, catchupHosts, pushRoutingOptions, thisHostName);
}
if (removedHosts.size() > 0) {
LOG.info("Dynamically removing hosts {} for {}", removedHosts, pushPhysicalPlanManager.getQueryId());
for (final KsqlNode node : removedHosts) {
final RoutingResult result = pushConnectionsHandle.remove(node);
result.close();
result.updateStatus(RoutingResultStatus.REMOVED);
}
}
pushPhysicalPlanManager.getContext().owner().setTimer(clusterCheckInterval, timerId -> checkForNewHosts(serviceContext, pushPhysicalPlanManager, statement, outputSchema, transientQueryQueue, pushConnectionsHandle, scalablePushQueryMetrics, pushRoutingOptions, thisHostName));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class JoinNodeTest method setUp.
@Before
public void setUp() {
builder = new StreamsBuilder();
final ServiceContext serviceContext = mock(ServiceContext.class);
when(serviceContext.getTopicClient()).thenReturn(mockKafkaTopicClient);
when(planBuildContext.getKsqlConfig()).thenReturn(ksqlConfig);
when(planBuildContext.getServiceContext()).thenReturn(serviceContext);
when(planBuildContext.withKsqlConfig(any())).thenReturn(planBuildContext);
when(planBuildContext.getFunctionRegistry()).thenReturn(functionRegistry);
when(planBuildContext.buildNodeContext(any())).thenAnswer(inv -> new QueryContext.Stacker().push(inv.getArgument(0).toString()));
when(executeContext.getKsqlConfig()).thenReturn(ksqlConfig);
when(executeContext.getStreamsBuilder()).thenReturn(builder);
when(executeContext.getFunctionRegistry()).thenReturn(functionRegistry);
when(executeContext.getProcessingLogger(any())).thenReturn(processLogger);
when(left.getSchema()).thenReturn(LEFT_NODE_SCHEMA);
when(right.getSchema()).thenReturn(RIGHT_NODE_SCHEMA);
when(right2.getSchema()).thenReturn(RIGHT2_NODE_SCHEMA);
when(left.getPartitions(mockKafkaTopicClient)).thenReturn(2);
when(right.getPartitions(mockKafkaTopicClient)).thenReturn(2);
when(left.getSourceName()).thenReturn(Optional.of(LEFT_ALIAS));
when(right.getSourceName()).thenReturn(Optional.of(RIGHT_ALIAS));
when(joinKey.resolveKeyName(any(), any())).thenReturn(SYNTH_KEY);
setUpSource(left, VALUE_FORMAT, leftSourceNode, leftSource);
setUpSource(right, OTHER_FORMAT, rightSourceNode, rightSource);
setUpSource(right2, OTHER_FORMAT, right2SourceNode, rightSource);
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class ListConnectorsExecutor method execute.
@SuppressWarnings("OptionalGetWithoutIsPresent")
public StatementExecutorResponse execute(final ConfiguredStatement<ListConnectors> configuredStatement, final SessionProperties sessionProperties, final KsqlExecutionContext ksqlExecutionContext, final ServiceContext serviceContext) {
final ConnectClient connectClient = serviceContext.getConnectClient();
final ConnectResponse<List<String>> connectors = serviceContext.getConnectClient().connectors();
if (connectors.error().isPresent()) {
return StatementExecutorResponse.handled(connectErrorHandler.handle(configuredStatement, connectors));
}
final List<SimpleConnectorInfo> infos = new ArrayList<>();
final List<KsqlWarning> warnings = new ArrayList<>();
final Scope scope = configuredStatement.getStatement().getScope();
for (final String name : connectors.datum().get()) {
final ConnectResponse<ConnectorInfo> response = connectClient.describe(name);
if (response.datum().filter(i -> inScope(i.type(), scope)).isPresent()) {
final ConnectResponse<ConnectorStateInfo> status = connectClient.status(name);
infos.add(fromConnectorInfoResponse(name, response, status));
} else if (response.error().isPresent()) {
if (scope == Scope.ALL) {
infos.add(new SimpleConnectorInfo(name, ConnectorType.UNKNOWN, null, null));
}
warnings.add(new KsqlWarning(String.format("Could not describe connector %s: %s", name, response.error().get())));
}
}
return StatementExecutorResponse.handled(Optional.of(new ConnectorList(configuredStatement.getStatementText(), warnings, infos)));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class ListTopicsExecutor method execute.
public static StatementExecutorResponse execute(final ConfiguredStatement<ListTopics> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext) {
final KafkaTopicClient client = serviceContext.getTopicClient();
final Map<String, TopicDescription> topicDescriptions = listTopics(client, statement);
if (statement.getStatement().getShowExtended()) {
final KafkaConsumerGroupClient consumerGroupClient = new KafkaConsumerGroupClientImpl(serviceContext::getAdminClient);
final Map<String, List<Integer>> topicConsumersAndGroupCount = getTopicConsumerAndGroupCounts(consumerGroupClient);
final List<KafkaTopicInfoExtended> topicInfoExtendedList = topicDescriptions.values().stream().map(desc -> topicDescriptionToTopicInfoExtended(desc, topicConsumersAndGroupCount)).collect(Collectors.toList());
return StatementExecutorResponse.handled(Optional.of(new KafkaTopicsListExtended(statement.getStatementText(), topicInfoExtendedList)));
} else {
final List<KafkaTopicInfo> topicInfoList = topicDescriptions.values().stream().map(ListTopicsExecutor::topicDescriptionToTopicInfo).collect(Collectors.toList());
return StatementExecutorResponse.handled(Optional.of(new KafkaTopicsList(statement.getStatementText(), topicInfoList)));
}
}
Aggregations