use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class DescribeConnectorExecutor method execute.
@SuppressWarnings("OptionalGetWithoutIsPresent")
public StatementExecutorResponse execute(final ConfiguredStatement<DescribeConnector> configuredStatement, final SessionProperties sessionProperties, final KsqlExecutionContext ksqlExecutionContext, final ServiceContext serviceContext) {
final String connectorName = configuredStatement.getStatement().getConnectorName();
final ConnectResponse<ConnectorStateInfo> statusResponse = serviceContext.getConnectClient().status(connectorName);
if (statusResponse.error().isPresent()) {
return StatementExecutorResponse.handled(connectErrorHandler.handle(configuredStatement, statusResponse));
}
final ConnectResponse<ConnectorInfo> infoResponse = serviceContext.getConnectClient().describe(connectorName);
if (infoResponse.error().isPresent()) {
return StatementExecutorResponse.handled(connectErrorHandler.handle(configuredStatement, infoResponse));
}
final ConnectorStateInfo status = statusResponse.datum().get();
final ConnectorInfo info = infoResponse.datum().get();
final Optional<Connector> connector = connectorFactory.apply(info);
final List<KsqlWarning> warnings;
final List<String> topics;
if (connector.isPresent()) {
// Small optimization. If a connector's info is not found in the response, don't query for
// active topics with the given connectorName
final ConnectResponse<Map<String, Map<String, List<String>>>> topicsResponse = serviceContext.getConnectClient().topics(connectorName);
// server logs.
if (topicsResponse.error().isPresent() && topicsResponse.httpCode() == HttpStatus.SC_NOT_FOUND) {
topics = ImmutableList.of();
warnings = ImmutableList.of();
LOG.warn("Could not list related topics due to error: " + topicsResponse.error().get());
} else if (topicsResponse.error().isPresent()) {
topics = ImmutableList.of();
warnings = ImmutableList.of(new KsqlWarning("Could not list related topics due to error: " + topicsResponse.error().get()));
} else {
topics = topicsResponse.datum().get().get(connectorName).getOrDefault(TOPICS_KEY, ImmutableList.of());
warnings = ImmutableList.of();
}
} else {
topics = ImmutableList.of();
warnings = ImmutableList.of();
}
final List<SourceDescription> sources;
if (connector.isPresent()) {
sources = ksqlExecutionContext.getMetaStore().getAllDataSources().values().stream().filter(source -> topics.contains(source.getKafkaTopicName())).map(source -> SourceDescriptionFactory.create(source, false, ImmutableList.of(), ImmutableList.of(), Optional.empty(), ImmutableList.of(), ImmutableList.of(), ksqlExecutionContext.metricCollectors())).collect(Collectors.toList());
} else {
sources = ImmutableList.of();
}
final ConnectorDescription description = new ConnectorDescription(configuredStatement.getStatementText(), info.config().get(ConnectorConfig.CONNECTOR_CLASS_CONFIG), status, sources, topics, warnings);
return StatementExecutorResponse.handled(Optional.of(description));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class ListFunctionsExecutor method execute.
public static StatementExecutorResponse execute(final ConfiguredStatement<ListFunctions> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext) {
final FunctionRegistry functionRegistry = executionContext.getMetaStore();
final List<SimpleFunctionInfo> all = functionRegistry.listFunctions().stream().map(factory -> new SimpleFunctionInfo(factory.getName().toUpperCase(), FunctionType.SCALAR, factory.getMetadata().getCategory())).collect(Collectors.toList());
functionRegistry.listTableFunctions().stream().map(factory -> new SimpleFunctionInfo(factory.getName().toUpperCase(), FunctionType.TABLE, factory.getMetadata().getCategory())).forEach(all::add);
functionRegistry.listAggregateFunctions().stream().map(factory -> new SimpleFunctionInfo(factory.getName().toUpperCase(), FunctionType.AGGREGATE, factory.getMetadata().getCategory())).forEach(all::add);
return StatementExecutorResponse.handled(Optional.of(new FunctionNameList(statement.getStatementText(), all)));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class LocalCommands method processLocalCommandFiles.
public void processLocalCommandFiles(final ServiceContext serviceContext) {
final FilenameFilter filter = (dir, fileName) -> fileName.endsWith(LOCAL_COMMANDS_FILE_SUFFIX);
final File[] files = directory.listFiles(filter);
if (files == null) {
throw new KsqlServerException("Bad local commands directory " + directory.getAbsolutePath() + ". Please check your configuration for " + KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG);
}
for (final File file : files) {
if (file.equals(currentLocalCommands.getFile())) {
continue;
}
try (LocalCommandsFile localCommandsFile = LocalCommandsFile.createReadonly(file)) {
final List<LocalCommand> localCommands = localCommandsFile.readRecords();
cleanUpTransientQueryState(localCommands, serviceContext);
markFileAsProcessed(file);
} catch (Exception e) {
LOG.error("Error processing local commands " + file.getAbsolutePath() + ". There may be orphaned transient topics or abandoned state stores.", e);
}
}
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class EngineExecutor method executeScalablePushQuery.
ScalablePushQueryMetadata executeScalablePushQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final PushRouting pushRouting, final PushRoutingOptions pushRoutingOptions, final QueryPlannerOptions queryPlannerOptions, final Context context, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics) {
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = pushRoutingOptions.getHasBeenForwarded() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PushPhysicalPlan plan = null;
try {
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, true);
final PushPhysicalPlanCreator pushPhysicalPlanCreator = (offsetRange, catchupConsumerGroup) -> buildScalablePushPhysicalPlan(logicalPlan, analysis, context, offsetRange, catchupConsumerGroup);
final Optional<PushOffsetRange> offsetRange = pushRoutingOptions.getContinuationToken().map(PushOffsetRange::deserialize);
final Optional<String> catchupConsumerGroup = pushRoutingOptions.getCatchupConsumerGroup();
final PushPhysicalPlanManager physicalPlanManager = new PushPhysicalPlanManager(pushPhysicalPlanCreator, catchupConsumerGroup, offsetRange);
final PushPhysicalPlan physicalPlan = physicalPlanManager.getPhysicalPlan();
plan = physicalPlan;
final TransientQueryQueue transientQueryQueue = new TransientQueryQueue(analysis.getLimitClause());
final PushQueryMetadata.ResultType resultType = physicalPlan.getScalablePushRegistry().isTable() ? physicalPlan.getScalablePushRegistry().isWindowed() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM;
final PushQueryQueuePopulator populator = () -> pushRouting.handlePushQuery(serviceContext, physicalPlanManager, statement, pushRoutingOptions, physicalPlan.getOutputSchema(), transientQueryQueue, scalablePushQueryMetrics, offsetRange);
final PushQueryPreparer preparer = () -> pushRouting.preparePushQuery(physicalPlanManager, statement, pushRoutingOptions);
final ScalablePushQueryMetadata metadata = new ScalablePushQueryMetadata(physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), transientQueryQueue, scalablePushQueryMetrics, resultType, populator, preparer, physicalPlan.getSourceType(), routingNodeType, physicalPlan::getRowsReadFromDataSource);
return metadata;
} catch (final Exception e) {
if (plan == null) {
scalablePushQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PushPhysicalPlan pushPhysicalPlan = plan;
scalablePushQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, pushPhysicalPlan.getSourceType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute push query V2 {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information." + " If you see this LOG message, please submit a GitHub ticket and" + " we will scrub the statement text from the error at {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute push query V2. {} {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed push query V2 text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class OrphanedTransientQueryCleaner method cleanupOrphanedInternalTopics.
/**
* Cleans up any internal topics that may exist for the given set of query application
* ids, since it's assumed that they are completed.
* @param serviceContext The service context
* @param queryApplicationIds The set of completed query application ids
*/
public void cleanupOrphanedInternalTopics(final ServiceContext serviceContext, final Set<String> queryApplicationIds) {
final KafkaTopicClient topicClient = serviceContext.getTopicClient();
final Set<String> topicNames;
try {
topicNames = topicClient.listTopicNames();
} catch (KafkaResponseGetFailedException e) {
LOG.error("Couldn't fetch topic names", e);
return;
}
// Find any transient query topics
final Set<String> orphanedQueryApplicationIds = topicNames.stream().map(topicName -> queryApplicationIds.stream().filter(topicName::startsWith).findFirst()).filter(Optional::isPresent).map(Optional::get).collect(Collectors.toSet());
for (final String queryApplicationId : orphanedQueryApplicationIds) {
cleanupService.addCleanupTask(new QueryCleanupService.QueryCleanupTask(serviceContext, queryApplicationId, Optional.empty(), true, ksqlConfig.getKsqlStreamConfigProps().getOrDefault(StreamsConfig.STATE_DIR_CONFIG, StreamsConfig.configDef().defaultValues().get(StreamsConfig.STATE_DIR_CONFIG)).toString(), ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG), ksqlConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG)));
}
}
Aggregations