Search in sources :

Example 1 with ImmutableAnalysis

use of io.confluent.ksql.analyzer.ImmutableAnalysis in project ksql by confluentinc.

the class EngineExecutor method executeScalablePushQuery.

ScalablePushQueryMetadata executeScalablePushQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final PushRouting pushRouting, final PushRoutingOptions pushRoutingOptions, final QueryPlannerOptions queryPlannerOptions, final Context context, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics) {
    final SessionConfig sessionConfig = statement.getSessionConfig();
    // If we ever change how many hops a request can do, we'll need to update this for correct
    // metrics.
    final RoutingNodeType routingNodeType = pushRoutingOptions.getHasBeenForwarded() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
    PushPhysicalPlan plan = null;
    try {
        final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
        final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, true);
        final PushPhysicalPlanCreator pushPhysicalPlanCreator = (offsetRange, catchupConsumerGroup) -> buildScalablePushPhysicalPlan(logicalPlan, analysis, context, offsetRange, catchupConsumerGroup);
        final Optional<PushOffsetRange> offsetRange = pushRoutingOptions.getContinuationToken().map(PushOffsetRange::deserialize);
        final Optional<String> catchupConsumerGroup = pushRoutingOptions.getCatchupConsumerGroup();
        final PushPhysicalPlanManager physicalPlanManager = new PushPhysicalPlanManager(pushPhysicalPlanCreator, catchupConsumerGroup, offsetRange);
        final PushPhysicalPlan physicalPlan = physicalPlanManager.getPhysicalPlan();
        plan = physicalPlan;
        final TransientQueryQueue transientQueryQueue = new TransientQueryQueue(analysis.getLimitClause());
        final PushQueryMetadata.ResultType resultType = physicalPlan.getScalablePushRegistry().isTable() ? physicalPlan.getScalablePushRegistry().isWindowed() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM;
        final PushQueryQueuePopulator populator = () -> pushRouting.handlePushQuery(serviceContext, physicalPlanManager, statement, pushRoutingOptions, physicalPlan.getOutputSchema(), transientQueryQueue, scalablePushQueryMetrics, offsetRange);
        final PushQueryPreparer preparer = () -> pushRouting.preparePushQuery(physicalPlanManager, statement, pushRoutingOptions);
        final ScalablePushQueryMetadata metadata = new ScalablePushQueryMetadata(physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), transientQueryQueue, scalablePushQueryMetrics, resultType, populator, preparer, physicalPlan.getSourceType(), routingNodeType, physicalPlan::getRowsReadFromDataSource);
        return metadata;
    } catch (final Exception e) {
        if (plan == null) {
            scalablePushQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
        } else {
            final PushPhysicalPlan pushPhysicalPlan = plan;
            scalablePushQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, pushPhysicalPlan.getSourceType(), routingNodeType));
        }
        final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
        final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
        final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
        // the contents of the query
        if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
            final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
            LOG.error("Failure to execute push query V2 {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information." + " If you see this LOG message, please submit a GitHub ticket and" + " we will scrub the statement text from the error at {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), loc);
        } else {
            LOG.error("Failure to execute push query V2. {} {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), e);
        }
        LOG.debug("Failed push query V2 text {}, {}", statement.getStatementText(), e);
        throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
    }
}
Also used : DataSource(io.confluent.ksql.metastore.model.DataSource) PushPhysicalPlanCreator(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanCreator) CreateTableAsSelect(io.confluent.ksql.parser.tree.CreateTableAsSelect) Arrays(java.util.Arrays) InternalFunctionRegistry(io.confluent.ksql.function.InternalFunctionRegistry) SourceName(io.confluent.ksql.name.SourceName) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) PushPhysicalPlanManager(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanManager) PushPhysicalPlanBuilder(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanBuilder) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) ExecuteResult(io.confluent.ksql.KsqlExecutionContext.ExecuteResult) Map(java.util.Map) KsqlBareOutputNode(io.confluent.ksql.planner.plan.KsqlBareOutputNode) QueryId(io.confluent.ksql.query.QueryId) ExecutionStep(io.confluent.ksql.execution.plan.ExecutionStep) RefinementInfo(io.confluent.ksql.serde.RefinementInfo) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Sink(io.confluent.ksql.parser.tree.Sink) Set(java.util.Set) Relation(io.confluent.ksql.parser.tree.Relation) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) MetaStoreImpl(io.confluent.ksql.metastore.MetaStoreImpl) KsqlException(io.confluent.ksql.util.KsqlException) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) Iterables(com.google.common.collect.Iterables) FormatOptions(io.confluent.ksql.schema.utils.FormatOptions) PushRouting(io.confluent.ksql.physical.scalablepush.PushRouting) UnqualifiedColumnReferenceExp(io.confluent.ksql.execution.expression.tree.UnqualifiedColumnReferenceExp) CreateStreamAsSelect(io.confluent.ksql.parser.tree.CreateStreamAsSelect) SessionConfig(io.confluent.ksql.config.SessionConfig) CreateStream(io.confluent.ksql.parser.tree.CreateStream) SingleColumn(io.confluent.ksql.parser.tree.SingleColumn) MetaStore(io.confluent.ksql.metastore.MetaStore) KsqlStructuredDataOutputNode(io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode) PushRoutingOptions(io.confluent.ksql.physical.scalablepush.PushRoutingOptions) PlanInfoExtractor(io.confluent.ksql.execution.plan.PlanInfoExtractor) DataSourceNode(io.confluent.ksql.planner.plan.DataSourceNode) QueryContainer(io.confluent.ksql.parser.tree.QueryContainer) OutputNode(io.confluent.ksql.planner.plan.OutputNode) Throwables(com.google.common.base.Throwables) PushQueryMetadata(io.confluent.ksql.util.PushQueryMetadata) PushQueryQueuePopulator(io.confluent.ksql.physical.scalablepush.PushQueryQueuePopulator) ValueFormat(io.confluent.ksql.serde.ValueFormat) Table(io.confluent.ksql.parser.tree.Table) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) CreateAsSelect(io.confluent.ksql.parser.tree.CreateAsSelect) KsqlTopic(io.confluent.ksql.execution.ddl.commands.KsqlTopic) OutputRefinement(io.confluent.ksql.parser.OutputRefinement) LogicalPlanNode(io.confluent.ksql.planner.LogicalPlanNode) Query(io.confluent.ksql.parser.tree.Query) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) AliasedRelation(io.confluent.ksql.parser.tree.AliasedRelation) Formats(io.confluent.ksql.execution.plan.Formats) MutableMetaStore(io.confluent.ksql.metastore.MutableMetaStore) Context(io.vertx.core.Context) CreateTable(io.confluent.ksql.parser.tree.CreateTable) Locale(java.util.Locale) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) KsqlTable(io.confluent.ksql.metastore.model.KsqlTable) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) KsqlConfig(io.confluent.ksql.util.KsqlConfig) ExecutableDdlStatement(io.confluent.ksql.parser.tree.ExecutableDdlStatement) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) DdlCommand(io.confluent.ksql.execution.ddl.commands.DdlCommand) Objects(java.util.Objects) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) QueryPlannerOptions(io.confluent.ksql.planner.QueryPlannerOptions) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Optional(java.util.Optional) Statement(io.confluent.ksql.parser.tree.Statement) KsqlConstants(io.confluent.ksql.util.KsqlConstants) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) PullQueryQueuePopulator(io.confluent.ksql.physical.pull.PullQueryQueuePopulator) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) PushPhysicalPlan(io.confluent.ksql.physical.scalablepush.PushPhysicalPlan) HARouting(io.confluent.ksql.physical.pull.HARouting) PlanInfo(io.confluent.ksql.execution.plan.PlanInfo) PullPhysicalPlanBuilder(io.confluent.ksql.physical.pull.PullPhysicalPlanBuilder) KeyFormat(io.confluent.ksql.serde.KeyFormat) ResultType(io.confluent.ksql.util.PushQueryMetadata.ResultType) CompletableFuture(java.util.concurrent.CompletableFuture) DataSourceType(io.confluent.ksql.metastore.model.DataSource.DataSourceType) OptionalInt(java.util.OptionalInt) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) LogicalPlanner(io.confluent.ksql.planner.LogicalPlanner) Logger(org.slf4j.Logger) PhysicalPlan(io.confluent.ksql.physical.PhysicalPlan) PlanSummary(io.confluent.ksql.util.PlanSummary) PullPhysicalPlan(io.confluent.ksql.physical.pull.PullPhysicalPlan) PlanNode(io.confluent.ksql.planner.plan.PlanNode) QueryRegistry(io.confluent.ksql.query.QueryRegistry) Collections(java.util.Collections) CreateTableCommand(io.confluent.ksql.execution.ddl.commands.CreateTableCommand) Select(io.confluent.ksql.parser.tree.Select) PushQueryPreparer(io.confluent.ksql.physical.scalablepush.PushQueryPreparer) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) PushQueryMetadata(io.confluent.ksql.util.PushQueryMetadata) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) PushPhysicalPlan(io.confluent.ksql.physical.scalablepush.PushPhysicalPlan) SessionConfig(io.confluent.ksql.config.SessionConfig) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalPlanNode(io.confluent.ksql.planner.LogicalPlanNode) KsqlException(io.confluent.ksql.util.KsqlException) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) PushQueryPreparer(io.confluent.ksql.physical.scalablepush.PushQueryPreparer) PushPhysicalPlanManager(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanManager) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) PushPhysicalPlanCreator(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanCreator) PushQueryQueuePopulator(io.confluent.ksql.physical.scalablepush.PushQueryQueuePopulator) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) ResultType(io.confluent.ksql.util.PushQueryMetadata.ResultType) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange)

Example 2 with ImmutableAnalysis

use of io.confluent.ksql.analyzer.ImmutableAnalysis in project ksql by confluentinc.

the class QueryMetricsUtil method initializePullStreamMetricsCallback.

public static MetricsCallback initializePullStreamMetricsCallback(final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final SlidingWindowRateLimiter pullBandRateLimiter, final ImmutableAnalysis analysis, final AtomicReference<StreamPullQueryMetadata> resultForMetrics, final AtomicReference<Decrementer> refDecrementer) {
    final MetricsCallback metricsCallback = (statusCode, requestBytes, responseBytes, startTimeNanos) -> pullQueryMetrics.ifPresent(metrics -> {
        metrics.recordStatusCode(statusCode);
        metrics.recordRequestSize(requestBytes);
        final StreamPullQueryMetadata m = resultForMetrics.get();
        final KafkaStreams.State state = m == null ? null : m.getTransientQueryMetadata().getKafkaStreams().state();
        if (m == null || state == null || state.equals(State.ERROR) || state.equals(State.PENDING_ERROR)) {
            recordErrorMetrics(pullQueryMetrics, responseBytes, startTimeNanos);
        } else {
            final boolean isWindowed = analysis.getFrom().getDataSource().getKsqlTopic().getKeyFormat().isWindowed();
            final QuerySourceType sourceType = isWindowed ? QuerySourceType.WINDOWED_STREAM : QuerySourceType.NON_WINDOWED_STREAM;
            // There is no WHERE clause constraint information in the persistent logical plan
            final PullPhysicalPlanType planType = PullPhysicalPlanType.UNKNOWN;
            final RoutingNodeType routingNodeType = RoutingNodeType.SOURCE_NODE;
            metrics.recordResponseSize(responseBytes, sourceType, planType, routingNodeType);
            metrics.recordLatency(startTimeNanos, sourceType, planType, routingNodeType);
            final TransientQueryQueue rowQueue = (TransientQueryQueue) m.getTransientQueryMetadata().getRowQueue();
            // The rows read from the underlying data source equal the rows read by the user
            // since the WHERE condition is pushed to the data source
            metrics.recordRowsReturned(rowQueue.getTotalRowsQueued(), sourceType, planType, routingNodeType);
            metrics.recordRowsProcessed(rowQueue.getTotalRowsQueued(), sourceType, planType, routingNodeType);
        }
        pullBandRateLimiter.add(responseBytes);
        // Decrement on happy or exception path
        final Decrementer decrementer = refDecrementer.get();
        if (decrementer != null) {
            decrementer.decrementAtMostOnce();
        }
    });
    return metricsCallback;
}
Also used : ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Decrementer(io.confluent.ksql.rest.util.ConcurrencyLimiter.Decrementer) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) QuerySourceType(io.confluent.ksql.util.KsqlConstants.QuerySourceType) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) MetricsCallback(io.confluent.ksql.api.server.MetricsCallback) State(org.apache.kafka.streams.KafkaStreams.State) AtomicReference(java.util.concurrent.atomic.AtomicReference) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) Optional(java.util.Optional) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) KafkaStreams(org.apache.kafka.streams.KafkaStreams) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) SlidingWindowRateLimiter(io.confluent.ksql.api.server.SlidingWindowRateLimiter) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) KafkaStreams(org.apache.kafka.streams.KafkaStreams) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) State(org.apache.kafka.streams.KafkaStreams.State) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) MetricsCallback(io.confluent.ksql.api.server.MetricsCallback) Decrementer(io.confluent.ksql.rest.util.ConcurrencyLimiter.Decrementer) QuerySourceType(io.confluent.ksql.util.KsqlConstants.QuerySourceType)

Example 3 with ImmutableAnalysis

use of io.confluent.ksql.analyzer.ImmutableAnalysis in project ksql by confluentinc.

the class EngineExecutor method executeTablePullQuery.

/**
 * Evaluates a pull query by first analyzing it, then building the logical plan and finally
 * the physical plan. The execution is then done using the physical plan in a pipelined manner.
 * @param statement The pull query
 * @param routingOptions Configuration parameters used for HA routing
 * @param pullQueryMetrics JMX metrics
 * @return the rows that are the result of evaluating the pull query
 */
PullQueryResult executeTablePullQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final HARouting routing, final RoutingOptions routingOptions, final QueryPlannerOptions queryPlannerOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final boolean startImmediately, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
    if (!statement.getStatement().isPullQuery()) {
        throw new IllegalArgumentException("Executor can only handle pull queries");
    }
    final SessionConfig sessionConfig = statement.getSessionConfig();
    // If we ever change how many hops a request can do, we'll need to update this for correct
    // metrics.
    final RoutingNodeType routingNodeType = routingOptions.getIsSkipForwardRequest() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
    PullPhysicalPlan plan = null;
    try {
        // Do not set sessionConfig.getConfig to true! The copying is inefficient and slows down pull
        // query performance significantly.  Instead use QueryPlannerOptions which check overrides
        // deliberately.
        final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
        final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, false);
        // This is a cancel signal that is used to stop both local operations and requests
        final CompletableFuture<Void> shouldCancelRequests = new CompletableFuture<>();
        plan = buildPullPhysicalPlan(logicalPlan, analysis, queryPlannerOptions, shouldCancelRequests, consistencyOffsetVector);
        final PullPhysicalPlan physicalPlan = plan;
        final PullQueryQueue pullQueryQueue = new PullQueryQueue(analysis.getLimitClause());
        final PullQueryQueuePopulator populator = () -> routing.handlePullQuery(serviceContext, physicalPlan, statement, routingOptions, physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
        final PullQueryResult result = new PullQueryResult(physicalPlan.getOutputSchema(), populator, physicalPlan.getQueryId(), pullQueryQueue, pullQueryMetrics, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType, physicalPlan::getRowsReadFromDataSource, shouldCancelRequests, consistencyOffsetVector);
        if (startImmediately) {
            result.start();
        }
        return result;
    } catch (final Exception e) {
        if (plan == null) {
            pullQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
        } else {
            final PullPhysicalPlan physicalPlan = plan;
            pullQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType));
        }
        final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
        final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
        final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
        // the contents of the query
        if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
            final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
            LOG.error("Failure to execute pull query {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information. If you " + "see this LOG message, please submit a GitHub ticket and we will scrub " + "the statement text from the error at {}", routingOptions.debugString(), queryPlannerOptions.debugString(), loc);
        } else {
            LOG.error("Failure to execute pull query. {} {}", routingOptions.debugString(), queryPlannerOptions.debugString(), e);
        }
        LOG.debug("Failed pull query text {}, {}", statement.getStatementText(), e);
        throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
    }
}
Also used : DataSource(io.confluent.ksql.metastore.model.DataSource) PushPhysicalPlanCreator(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanCreator) CreateTableAsSelect(io.confluent.ksql.parser.tree.CreateTableAsSelect) Arrays(java.util.Arrays) InternalFunctionRegistry(io.confluent.ksql.function.InternalFunctionRegistry) SourceName(io.confluent.ksql.name.SourceName) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) PushPhysicalPlanManager(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanManager) PushPhysicalPlanBuilder(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanBuilder) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) ExecuteResult(io.confluent.ksql.KsqlExecutionContext.ExecuteResult) Map(java.util.Map) KsqlBareOutputNode(io.confluent.ksql.planner.plan.KsqlBareOutputNode) QueryId(io.confluent.ksql.query.QueryId) ExecutionStep(io.confluent.ksql.execution.plan.ExecutionStep) RefinementInfo(io.confluent.ksql.serde.RefinementInfo) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Sink(io.confluent.ksql.parser.tree.Sink) Set(java.util.Set) Relation(io.confluent.ksql.parser.tree.Relation) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) MetaStoreImpl(io.confluent.ksql.metastore.MetaStoreImpl) KsqlException(io.confluent.ksql.util.KsqlException) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) Iterables(com.google.common.collect.Iterables) FormatOptions(io.confluent.ksql.schema.utils.FormatOptions) PushRouting(io.confluent.ksql.physical.scalablepush.PushRouting) UnqualifiedColumnReferenceExp(io.confluent.ksql.execution.expression.tree.UnqualifiedColumnReferenceExp) CreateStreamAsSelect(io.confluent.ksql.parser.tree.CreateStreamAsSelect) SessionConfig(io.confluent.ksql.config.SessionConfig) CreateStream(io.confluent.ksql.parser.tree.CreateStream) SingleColumn(io.confluent.ksql.parser.tree.SingleColumn) MetaStore(io.confluent.ksql.metastore.MetaStore) KsqlStructuredDataOutputNode(io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode) PushRoutingOptions(io.confluent.ksql.physical.scalablepush.PushRoutingOptions) PlanInfoExtractor(io.confluent.ksql.execution.plan.PlanInfoExtractor) DataSourceNode(io.confluent.ksql.planner.plan.DataSourceNode) QueryContainer(io.confluent.ksql.parser.tree.QueryContainer) OutputNode(io.confluent.ksql.planner.plan.OutputNode) Throwables(com.google.common.base.Throwables) PushQueryMetadata(io.confluent.ksql.util.PushQueryMetadata) PushQueryQueuePopulator(io.confluent.ksql.physical.scalablepush.PushQueryQueuePopulator) ValueFormat(io.confluent.ksql.serde.ValueFormat) Table(io.confluent.ksql.parser.tree.Table) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) CreateAsSelect(io.confluent.ksql.parser.tree.CreateAsSelect) KsqlTopic(io.confluent.ksql.execution.ddl.commands.KsqlTopic) OutputRefinement(io.confluent.ksql.parser.OutputRefinement) LogicalPlanNode(io.confluent.ksql.planner.LogicalPlanNode) Query(io.confluent.ksql.parser.tree.Query) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) AliasedRelation(io.confluent.ksql.parser.tree.AliasedRelation) Formats(io.confluent.ksql.execution.plan.Formats) MutableMetaStore(io.confluent.ksql.metastore.MutableMetaStore) Context(io.vertx.core.Context) CreateTable(io.confluent.ksql.parser.tree.CreateTable) Locale(java.util.Locale) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) KsqlTable(io.confluent.ksql.metastore.model.KsqlTable) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) KsqlConfig(io.confluent.ksql.util.KsqlConfig) ExecutableDdlStatement(io.confluent.ksql.parser.tree.ExecutableDdlStatement) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) DdlCommand(io.confluent.ksql.execution.ddl.commands.DdlCommand) Objects(java.util.Objects) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) QueryPlannerOptions(io.confluent.ksql.planner.QueryPlannerOptions) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Optional(java.util.Optional) Statement(io.confluent.ksql.parser.tree.Statement) KsqlConstants(io.confluent.ksql.util.KsqlConstants) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) PullQueryQueuePopulator(io.confluent.ksql.physical.pull.PullQueryQueuePopulator) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) PushPhysicalPlan(io.confluent.ksql.physical.scalablepush.PushPhysicalPlan) HARouting(io.confluent.ksql.physical.pull.HARouting) PlanInfo(io.confluent.ksql.execution.plan.PlanInfo) PullPhysicalPlanBuilder(io.confluent.ksql.physical.pull.PullPhysicalPlanBuilder) KeyFormat(io.confluent.ksql.serde.KeyFormat) ResultType(io.confluent.ksql.util.PushQueryMetadata.ResultType) CompletableFuture(java.util.concurrent.CompletableFuture) DataSourceType(io.confluent.ksql.metastore.model.DataSource.DataSourceType) OptionalInt(java.util.OptionalInt) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) LogicalPlanner(io.confluent.ksql.planner.LogicalPlanner) Logger(org.slf4j.Logger) PhysicalPlan(io.confluent.ksql.physical.PhysicalPlan) PlanSummary(io.confluent.ksql.util.PlanSummary) PullPhysicalPlan(io.confluent.ksql.physical.pull.PullPhysicalPlan) PlanNode(io.confluent.ksql.planner.plan.PlanNode) QueryRegistry(io.confluent.ksql.query.QueryRegistry) Collections(java.util.Collections) CreateTableCommand(io.confluent.ksql.execution.ddl.commands.CreateTableCommand) Select(io.confluent.ksql.parser.tree.Select) PushQueryPreparer(io.confluent.ksql.physical.scalablepush.PushQueryPreparer) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) SessionConfig(io.confluent.ksql.config.SessionConfig) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalPlanNode(io.confluent.ksql.planner.LogicalPlanNode) PullQueryQueuePopulator(io.confluent.ksql.physical.pull.PullQueryQueuePopulator) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) KsqlException(io.confluent.ksql.util.KsqlException) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) PullPhysicalPlan(io.confluent.ksql.physical.pull.PullPhysicalPlan) CompletableFuture(java.util.concurrent.CompletableFuture) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult)

Example 4 with ImmutableAnalysis

use of io.confluent.ksql.analyzer.ImmutableAnalysis in project ksql by confluentinc.

the class KsqlEngine method analyzeQueryWithNoOutputTopic.

/**
 * For analyzing queries that you know won't have an output topic, such as pull queries.
 */
public ImmutableAnalysis analyzeQueryWithNoOutputTopic(final Query query, final String queryText, final Map<String, Object> configOverrides) {
    final KsqlConfig ksqlConfig = this.primaryContext.getKsqlConfig();
    final QueryAnalyzer queryAnalyzer = new QueryAnalyzer(getMetaStore(), "", getRowpartitionRowoffsetEnabled(ksqlConfig, configOverrides), ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_LIMIT_CLAUSE_ENABLED));
    final Analysis analysis;
    try {
        analysis = queryAnalyzer.analyze(query, Optional.empty());
    } catch (final KsqlException e) {
        throw new KsqlStatementException(e.getMessage(), queryText, e);
    }
    return new RewrittenAnalysis(analysis, new QueryExecutionUtil.ColumnReferenceRewriter()::process);
}
Also used : RewrittenAnalysis(io.confluent.ksql.analyzer.RewrittenAnalysis) RewrittenAnalysis(io.confluent.ksql.analyzer.RewrittenAnalysis) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Analysis(io.confluent.ksql.analyzer.Analysis) KsqlConfig(io.confluent.ksql.util.KsqlConfig) QueryAnalyzer(io.confluent.ksql.analyzer.QueryAnalyzer) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) KsqlException(io.confluent.ksql.util.KsqlException)

Example 5 with ImmutableAnalysis

use of io.confluent.ksql.analyzer.ImmutableAnalysis in project ksql by confluentinc.

the class QueryExecutor method handleQuery.

private QueryMetadataHolder handleQuery(final ServiceContext serviceContext, final PreparedStatement<Query> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final Context context, final boolean excludeTombstones) {
    if (statement.getStatement().isPullQuery()) {
        final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
        final DataSource dataSource = analysis.getFrom().getDataSource();
        final DataSource.DataSourceType dataSourceType = dataSource.getDataSourceType();
        if (!ksqlConfig.getBoolean(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG)) {
            throw new KsqlStatementException("Pull queries are disabled." + PullQueryValidator.PULL_QUERY_SYNTAX_HELP + System.lineSeparator() + "Please set " + KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG + "=true to enable " + "this feature." + System.lineSeparator(), statement.getStatementText());
        }
        Optional<ConsistencyOffsetVector> consistencyOffsetVector = Optional.empty();
        if (ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR_ENABLED) && requestProperties.containsKey(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR)) {
            final String serializedCV = (String) requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR);
            // serializedCV will be empty on the first request as the consistency vector is initialized
            // at the server
            consistencyOffsetVector = serializedCV != null && !serializedCV.equals("") ? Optional.of(ConsistencyOffsetVector.deserialize(serializedCV)) : Optional.of(ConsistencyOffsetVector.emptyVector());
        }
        switch(dataSourceType) {
            case KTABLE:
                {
                    // First thing, set the metrics callback so that it gets called, even if we hit an error
                    final AtomicReference<PullQueryResult> resultForMetrics = new AtomicReference<>(null);
                    metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullTableMetricsCallback(pullQueryMetrics, pullBandRateLimiter, resultForMetrics));
                    final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
                    final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
                    return handleTablePullQuery(analysis, serviceContext, configured, requestProperties, isInternalRequest, pullBandRateLimiter, resultForMetrics, consistencyOffsetVector);
                }
            case KSTREAM:
                {
                    // First thing, set the metrics callback so that it gets called, even if we hit an error
                    final AtomicReference<StreamPullQueryMetadata> resultForMetrics = new AtomicReference<>(null);
                    final AtomicReference<Decrementer> refDecrementer = new AtomicReference<>(null);
                    metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullStreamMetricsCallback(pullQueryMetrics, pullBandRateLimiter, analysis, resultForMetrics, refDecrementer));
                    final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
                    final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
                    return handleStreamPullQuery(analysis, serviceContext, configured, resultForMetrics, refDecrementer);
                }
            default:
                throw new KsqlStatementException("Unexpected data source type for pull query: " + dataSourceType, statement.getStatementText());
        }
    } else if (ScalablePushUtil.isScalablePushQuery(statement.getStatement(), ksqlEngine, ksqlConfig, configOverrides)) {
        // First thing, set the metrics callback so that it gets called, even if we hit an error
        final AtomicReference<ScalablePushQueryMetadata> resultForMetrics = new AtomicReference<>(null);
        metricsCallbackHolder.setCallback(QueryMetricsUtil.initializeScalablePushMetricsCallback(scalablePushQueryMetrics, scalablePushBandRateLimiter, resultForMetrics));
        final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
        QueryLogger.info("Scalable push query created", statement.getStatementText());
        return handleScalablePushQuery(analysis, serviceContext, statement, configOverrides, requestProperties, context, scalablePushBandRateLimiter, resultForMetrics);
    } else {
        // log validated statements for query anonymization
        QueryLogger.info("Transient query created", statement.getStatementText());
        return handlePushQuery(serviceContext, statement, configOverrides, excludeTombstones);
    }
}
Also used : ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) SessionConfig(io.confluent.ksql.config.SessionConfig) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) AtomicReference(java.util.concurrent.atomic.AtomicReference) DataSource(io.confluent.ksql.metastore.model.DataSource)

Aggregations

ImmutableAnalysis (io.confluent.ksql.analyzer.ImmutableAnalysis)5 SessionConfig (io.confluent.ksql.config.SessionConfig)3 PullQueryExecutorMetrics (io.confluent.ksql.internal.PullQueryExecutorMetrics)3 ScalablePushQueryMetrics (io.confluent.ksql.internal.ScalablePushQueryMetrics)3 DataSource (io.confluent.ksql.metastore.model.DataSource)3 KsqlStatementException (io.confluent.ksql.util.KsqlStatementException)3 Throwables (com.google.common.base.Throwables)2 ImmutableMap (com.google.common.collect.ImmutableMap)2 ImmutableSet (com.google.common.collect.ImmutableSet)2 Iterables (com.google.common.collect.Iterables)2 SuppressFBWarnings (edu.umd.cs.findbugs.annotations.SuppressFBWarnings)2 ExecuteResult (io.confluent.ksql.KsqlExecutionContext.ExecuteResult)2 CreateTableCommand (io.confluent.ksql.execution.ddl.commands.CreateTableCommand)2 DdlCommand (io.confluent.ksql.execution.ddl.commands.DdlCommand)2 KsqlTopic (io.confluent.ksql.execution.ddl.commands.KsqlTopic)2 UnqualifiedColumnReferenceExp (io.confluent.ksql.execution.expression.tree.UnqualifiedColumnReferenceExp)2 ExecutionStep (io.confluent.ksql.execution.plan.ExecutionStep)2 Formats (io.confluent.ksql.execution.plan.Formats)2 PlanInfo (io.confluent.ksql.execution.plan.PlanInfo)2 PlanInfoExtractor (io.confluent.ksql.execution.plan.PlanInfoExtractor)2