Search in sources :

Example 1 with PullQueryExecutorMetrics

use of io.confluent.ksql.internal.PullQueryExecutorMetrics in project ksql by confluentinc.

the class HARouting method executeOrRouteQuery.

@SuppressWarnings("ParameterNumber")
@VisibleForTesting
static PartitionFetchResult executeOrRouteQuery(final KsqlNode node, final KsqlPartitionLocation location, final ConfiguredStatement<Query> statement, final ServiceContext serviceContext, final RoutingOptions routingOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final PullPhysicalPlan pullPhysicalPlan, final LogicalSchema outputSchema, final QueryId queryId, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
    final BiFunction<List<?>, LogicalSchema, PullQueryRow> rowFactory = (rawRow, schema) -> new PullQueryRow(rawRow, schema, Optional.ofNullable(routingOptions.getIsDebugRequest() ? node : null), Optional.empty());
    if (node.isLocal()) {
        try {
            LOG.debug("Query {} executed locally at host {} at timestamp {}.", statement.getStatementText(), node.location(), System.currentTimeMillis());
            pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordLocalRequests(1));
            synchronized (pullPhysicalPlan) {
                pullPhysicalPlan.execute(ImmutableList.of(location), pullQueryQueue, rowFactory);
                return new PartitionFetchResult(RoutingResult.SUCCESS, location, Optional.empty());
            }
        } catch (StandbyFallbackException | NotUpToBoundException e) {
            LOG.warn("Error executing query locally at node {}. Falling back to standby state which " + "may return stale results. Cause {}", node, e.getMessage());
            return new PartitionFetchResult(RoutingResult.STANDBY_FALLBACK, location, Optional.of(e));
        } catch (Exception e) {
            throw new KsqlException(String.format("Error executing query locally at node %s: %s", node.location(), e.getMessage()), e);
        }
    } else {
        try {
            LOG.debug("Query {} routed to host {} at timestamp {}.", statement.getStatementText(), node.location(), System.currentTimeMillis());
            pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordRemoteRequests(1));
            forwardTo(node, ImmutableList.of(location), statement, serviceContext, pullQueryQueue, rowFactory, outputSchema, shouldCancelRequests, consistencyOffsetVector);
            return new PartitionFetchResult(RoutingResult.SUCCESS, location, Optional.empty());
        } catch (StandbyFallbackException e) {
            LOG.warn("Error forwarding query to node {}. Falling back to standby state which may " + "return stale results", node.location(), e.getCause());
            return new PartitionFetchResult(RoutingResult.STANDBY_FALLBACK, location, Optional.of(e));
        } catch (Exception e) {
            throw new KsqlException(String.format("Error forwarding query to node %s: %s", node.location(), e.getMessage()), e);
        }
    }
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) Query(io.confluent.ksql.parser.tree.Query) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) RoutingFilterFactory(io.confluent.ksql.execution.streams.RoutingFilter.RoutingFilterFactory) BiFunction(java.util.function.BiFunction) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) KsqlNode(io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode) Header(io.confluent.ksql.rest.entity.StreamedRow.Header) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RestResponse(io.confluent.ksql.rest.client.RestResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) ImmutableList(com.google.common.collect.ImmutableList) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) Host(io.confluent.ksql.execution.streams.RoutingFilter.Host) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) QueryId(io.confluent.ksql.query.QueryId) ExecutorService(java.util.concurrent.ExecutorService) KsqlRequestConfig(io.confluent.ksql.util.KsqlRequestConfig) Logger(org.slf4j.Logger) ImmutableMap(com.google.common.collect.ImmutableMap) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) Consumer(java.util.function.Consumer) List(java.util.List) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) KsqlException(io.confluent.ksql.util.KsqlException) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) KsqlException(io.confluent.ksql.util.KsqlException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with PullQueryExecutorMetrics

use of io.confluent.ksql.internal.PullQueryExecutorMetrics in project ksql by confluentinc.

the class QueryMetricsUtil method initializePullTableMetricsCallback.

public static MetricsCallback initializePullTableMetricsCallback(final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final SlidingWindowRateLimiter pullBandRateLimiter, final AtomicReference<PullQueryResult> resultForMetrics) {
    final MetricsCallback metricsCallback = (statusCode, requestBytes, responseBytes, startTimeNanos) -> pullQueryMetrics.ifPresent(metrics -> {
        metrics.recordStatusCode(statusCode);
        metrics.recordRequestSize(requestBytes);
        final PullQueryResult r = resultForMetrics.get();
        if (r == null) {
            recordErrorMetrics(pullQueryMetrics, responseBytes, startTimeNanos);
        } else {
            final QuerySourceType sourceType = r.getSourceType();
            final PullPhysicalPlanType planType = r.getPlanType();
            final RoutingNodeType routingNodeType = RoutingNodeType.SOURCE_NODE;
            metrics.recordResponseSize(responseBytes, sourceType, planType, routingNodeType);
            metrics.recordLatency(startTimeNanos, sourceType, planType, routingNodeType);
            metrics.recordRowsReturned(r.getTotalRowsReturned(), sourceType, planType, routingNodeType);
            metrics.recordRowsProcessed(r.getTotalRowsProcessed(), sourceType, planType, routingNodeType);
        }
        pullBandRateLimiter.add(responseBytes);
    });
    return metricsCallback;
}
Also used : ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Decrementer(io.confluent.ksql.rest.util.ConcurrencyLimiter.Decrementer) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) QuerySourceType(io.confluent.ksql.util.KsqlConstants.QuerySourceType) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) MetricsCallback(io.confluent.ksql.api.server.MetricsCallback) State(org.apache.kafka.streams.KafkaStreams.State) AtomicReference(java.util.concurrent.atomic.AtomicReference) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) Optional(java.util.Optional) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) KafkaStreams(org.apache.kafka.streams.KafkaStreams) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) SlidingWindowRateLimiter(io.confluent.ksql.api.server.SlidingWindowRateLimiter) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) MetricsCallback(io.confluent.ksql.api.server.MetricsCallback) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) QuerySourceType(io.confluent.ksql.util.KsqlConstants.QuerySourceType)

Example 3 with PullQueryExecutorMetrics

use of io.confluent.ksql.internal.PullQueryExecutorMetrics in project ksql by confluentinc.

the class QueryMetricsUtil method initializePullStreamMetricsCallback.

public static MetricsCallback initializePullStreamMetricsCallback(final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final SlidingWindowRateLimiter pullBandRateLimiter, final ImmutableAnalysis analysis, final AtomicReference<StreamPullQueryMetadata> resultForMetrics, final AtomicReference<Decrementer> refDecrementer) {
    final MetricsCallback metricsCallback = (statusCode, requestBytes, responseBytes, startTimeNanos) -> pullQueryMetrics.ifPresent(metrics -> {
        metrics.recordStatusCode(statusCode);
        metrics.recordRequestSize(requestBytes);
        final StreamPullQueryMetadata m = resultForMetrics.get();
        final KafkaStreams.State state = m == null ? null : m.getTransientQueryMetadata().getKafkaStreams().state();
        if (m == null || state == null || state.equals(State.ERROR) || state.equals(State.PENDING_ERROR)) {
            recordErrorMetrics(pullQueryMetrics, responseBytes, startTimeNanos);
        } else {
            final boolean isWindowed = analysis.getFrom().getDataSource().getKsqlTopic().getKeyFormat().isWindowed();
            final QuerySourceType sourceType = isWindowed ? QuerySourceType.WINDOWED_STREAM : QuerySourceType.NON_WINDOWED_STREAM;
            // There is no WHERE clause constraint information in the persistent logical plan
            final PullPhysicalPlanType planType = PullPhysicalPlanType.UNKNOWN;
            final RoutingNodeType routingNodeType = RoutingNodeType.SOURCE_NODE;
            metrics.recordResponseSize(responseBytes, sourceType, planType, routingNodeType);
            metrics.recordLatency(startTimeNanos, sourceType, planType, routingNodeType);
            final TransientQueryQueue rowQueue = (TransientQueryQueue) m.getTransientQueryMetadata().getRowQueue();
            // The rows read from the underlying data source equal the rows read by the user
            // since the WHERE condition is pushed to the data source
            metrics.recordRowsReturned(rowQueue.getTotalRowsQueued(), sourceType, planType, routingNodeType);
            metrics.recordRowsProcessed(rowQueue.getTotalRowsQueued(), sourceType, planType, routingNodeType);
        }
        pullBandRateLimiter.add(responseBytes);
        // Decrement on happy or exception path
        final Decrementer decrementer = refDecrementer.get();
        if (decrementer != null) {
            decrementer.decrementAtMostOnce();
        }
    });
    return metricsCallback;
}
Also used : ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Decrementer(io.confluent.ksql.rest.util.ConcurrencyLimiter.Decrementer) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) QuerySourceType(io.confluent.ksql.util.KsqlConstants.QuerySourceType) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) MetricsCallback(io.confluent.ksql.api.server.MetricsCallback) State(org.apache.kafka.streams.KafkaStreams.State) AtomicReference(java.util.concurrent.atomic.AtomicReference) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) Optional(java.util.Optional) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) KafkaStreams(org.apache.kafka.streams.KafkaStreams) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) SlidingWindowRateLimiter(io.confluent.ksql.api.server.SlidingWindowRateLimiter) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) KafkaStreams(org.apache.kafka.streams.KafkaStreams) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) State(org.apache.kafka.streams.KafkaStreams.State) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) MetricsCallback(io.confluent.ksql.api.server.MetricsCallback) Decrementer(io.confluent.ksql.rest.util.ConcurrencyLimiter.Decrementer) QuerySourceType(io.confluent.ksql.util.KsqlConstants.QuerySourceType)

Example 4 with PullQueryExecutorMetrics

use of io.confluent.ksql.internal.PullQueryExecutorMetrics in project ksql by confluentinc.

the class EngineExecutor method executeTablePullQuery.

/**
 * Evaluates a pull query by first analyzing it, then building the logical plan and finally
 * the physical plan. The execution is then done using the physical plan in a pipelined manner.
 * @param statement The pull query
 * @param routingOptions Configuration parameters used for HA routing
 * @param pullQueryMetrics JMX metrics
 * @return the rows that are the result of evaluating the pull query
 */
PullQueryResult executeTablePullQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final HARouting routing, final RoutingOptions routingOptions, final QueryPlannerOptions queryPlannerOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final boolean startImmediately, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
    if (!statement.getStatement().isPullQuery()) {
        throw new IllegalArgumentException("Executor can only handle pull queries");
    }
    final SessionConfig sessionConfig = statement.getSessionConfig();
    // If we ever change how many hops a request can do, we'll need to update this for correct
    // metrics.
    final RoutingNodeType routingNodeType = routingOptions.getIsSkipForwardRequest() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
    PullPhysicalPlan plan = null;
    try {
        // Do not set sessionConfig.getConfig to true! The copying is inefficient and slows down pull
        // query performance significantly.  Instead use QueryPlannerOptions which check overrides
        // deliberately.
        final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
        final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, false);
        // This is a cancel signal that is used to stop both local operations and requests
        final CompletableFuture<Void> shouldCancelRequests = new CompletableFuture<>();
        plan = buildPullPhysicalPlan(logicalPlan, analysis, queryPlannerOptions, shouldCancelRequests, consistencyOffsetVector);
        final PullPhysicalPlan physicalPlan = plan;
        final PullQueryQueue pullQueryQueue = new PullQueryQueue(analysis.getLimitClause());
        final PullQueryQueuePopulator populator = () -> routing.handlePullQuery(serviceContext, physicalPlan, statement, routingOptions, physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
        final PullQueryResult result = new PullQueryResult(physicalPlan.getOutputSchema(), populator, physicalPlan.getQueryId(), pullQueryQueue, pullQueryMetrics, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType, physicalPlan::getRowsReadFromDataSource, shouldCancelRequests, consistencyOffsetVector);
        if (startImmediately) {
            result.start();
        }
        return result;
    } catch (final Exception e) {
        if (plan == null) {
            pullQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
        } else {
            final PullPhysicalPlan physicalPlan = plan;
            pullQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType));
        }
        final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
        final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
        final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
        // the contents of the query
        if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
            final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
            LOG.error("Failure to execute pull query {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information. If you " + "see this LOG message, please submit a GitHub ticket and we will scrub " + "the statement text from the error at {}", routingOptions.debugString(), queryPlannerOptions.debugString(), loc);
        } else {
            LOG.error("Failure to execute pull query. {} {}", routingOptions.debugString(), queryPlannerOptions.debugString(), e);
        }
        LOG.debug("Failed pull query text {}, {}", statement.getStatementText(), e);
        throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
    }
}
Also used : DataSource(io.confluent.ksql.metastore.model.DataSource) PushPhysicalPlanCreator(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanCreator) CreateTableAsSelect(io.confluent.ksql.parser.tree.CreateTableAsSelect) Arrays(java.util.Arrays) InternalFunctionRegistry(io.confluent.ksql.function.InternalFunctionRegistry) SourceName(io.confluent.ksql.name.SourceName) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) PushPhysicalPlanManager(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanManager) PushPhysicalPlanBuilder(io.confluent.ksql.physical.scalablepush.PushPhysicalPlanBuilder) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) ExecuteResult(io.confluent.ksql.KsqlExecutionContext.ExecuteResult) Map(java.util.Map) KsqlBareOutputNode(io.confluent.ksql.planner.plan.KsqlBareOutputNode) QueryId(io.confluent.ksql.query.QueryId) ExecutionStep(io.confluent.ksql.execution.plan.ExecutionStep) RefinementInfo(io.confluent.ksql.serde.RefinementInfo) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) Sink(io.confluent.ksql.parser.tree.Sink) Set(java.util.Set) Relation(io.confluent.ksql.parser.tree.Relation) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) MetaStoreImpl(io.confluent.ksql.metastore.MetaStoreImpl) KsqlException(io.confluent.ksql.util.KsqlException) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) Iterables(com.google.common.collect.Iterables) FormatOptions(io.confluent.ksql.schema.utils.FormatOptions) PushRouting(io.confluent.ksql.physical.scalablepush.PushRouting) UnqualifiedColumnReferenceExp(io.confluent.ksql.execution.expression.tree.UnqualifiedColumnReferenceExp) CreateStreamAsSelect(io.confluent.ksql.parser.tree.CreateStreamAsSelect) SessionConfig(io.confluent.ksql.config.SessionConfig) CreateStream(io.confluent.ksql.parser.tree.CreateStream) SingleColumn(io.confluent.ksql.parser.tree.SingleColumn) MetaStore(io.confluent.ksql.metastore.MetaStore) KsqlStructuredDataOutputNode(io.confluent.ksql.planner.plan.KsqlStructuredDataOutputNode) PushRoutingOptions(io.confluent.ksql.physical.scalablepush.PushRoutingOptions) PlanInfoExtractor(io.confluent.ksql.execution.plan.PlanInfoExtractor) DataSourceNode(io.confluent.ksql.planner.plan.DataSourceNode) QueryContainer(io.confluent.ksql.parser.tree.QueryContainer) OutputNode(io.confluent.ksql.planner.plan.OutputNode) Throwables(com.google.common.base.Throwables) PushQueryMetadata(io.confluent.ksql.util.PushQueryMetadata) PushQueryQueuePopulator(io.confluent.ksql.physical.scalablepush.PushQueryQueuePopulator) ValueFormat(io.confluent.ksql.serde.ValueFormat) Table(io.confluent.ksql.parser.tree.Table) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) CreateAsSelect(io.confluent.ksql.parser.tree.CreateAsSelect) KsqlTopic(io.confluent.ksql.execution.ddl.commands.KsqlTopic) OutputRefinement(io.confluent.ksql.parser.OutputRefinement) LogicalPlanNode(io.confluent.ksql.planner.LogicalPlanNode) Query(io.confluent.ksql.parser.tree.Query) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) AliasedRelation(io.confluent.ksql.parser.tree.AliasedRelation) Formats(io.confluent.ksql.execution.plan.Formats) MutableMetaStore(io.confluent.ksql.metastore.MutableMetaStore) Context(io.vertx.core.Context) CreateTable(io.confluent.ksql.parser.tree.CreateTable) Locale(java.util.Locale) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) KsqlTable(io.confluent.ksql.metastore.model.KsqlTable) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) KsqlConfig(io.confluent.ksql.util.KsqlConfig) ExecutableDdlStatement(io.confluent.ksql.parser.tree.ExecutableDdlStatement) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) DdlCommand(io.confluent.ksql.execution.ddl.commands.DdlCommand) Objects(java.util.Objects) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) QueryPlannerOptions(io.confluent.ksql.planner.QueryPlannerOptions) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Optional(java.util.Optional) Statement(io.confluent.ksql.parser.tree.Statement) KsqlConstants(io.confluent.ksql.util.KsqlConstants) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) PullQueryQueuePopulator(io.confluent.ksql.physical.pull.PullQueryQueuePopulator) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) PushPhysicalPlan(io.confluent.ksql.physical.scalablepush.PushPhysicalPlan) HARouting(io.confluent.ksql.physical.pull.HARouting) PlanInfo(io.confluent.ksql.execution.plan.PlanInfo) PullPhysicalPlanBuilder(io.confluent.ksql.physical.pull.PullPhysicalPlanBuilder) KeyFormat(io.confluent.ksql.serde.KeyFormat) ResultType(io.confluent.ksql.util.PushQueryMetadata.ResultType) CompletableFuture(java.util.concurrent.CompletableFuture) DataSourceType(io.confluent.ksql.metastore.model.DataSource.DataSourceType) OptionalInt(java.util.OptionalInt) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) LogicalPlanner(io.confluent.ksql.planner.LogicalPlanner) Logger(org.slf4j.Logger) PhysicalPlan(io.confluent.ksql.physical.PhysicalPlan) PlanSummary(io.confluent.ksql.util.PlanSummary) PullPhysicalPlan(io.confluent.ksql.physical.pull.PullPhysicalPlan) PlanNode(io.confluent.ksql.planner.plan.PlanNode) QueryRegistry(io.confluent.ksql.query.QueryRegistry) Collections(java.util.Collections) CreateTableCommand(io.confluent.ksql.execution.ddl.commands.CreateTableCommand) Select(io.confluent.ksql.parser.tree.Select) PushQueryPreparer(io.confluent.ksql.physical.scalablepush.PushQueryPreparer) RoutingNodeType(io.confluent.ksql.util.KsqlConstants.RoutingNodeType) SessionConfig(io.confluent.ksql.config.SessionConfig) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalPlanNode(io.confluent.ksql.planner.LogicalPlanNode) PullQueryQueuePopulator(io.confluent.ksql.physical.pull.PullQueryQueuePopulator) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) KsqlException(io.confluent.ksql.util.KsqlException) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) PullPhysicalPlan(io.confluent.ksql.physical.pull.PullPhysicalPlan) CompletableFuture(java.util.concurrent.CompletableFuture) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult)

Example 5 with PullQueryExecutorMetrics

use of io.confluent.ksql.internal.PullQueryExecutorMetrics in project ksql by confluentinc.

the class HARoutingTest method setUp.

@Before
public void setUp() {
    when(pullPhysicalPlan.getMaterialization()).thenReturn(materialization);
    when(pullPhysicalPlan.getMaterialization().locator()).thenReturn(locator);
    when(statement.getStatementText()).thenReturn("foo");
    when(statement.getSessionConfig()).thenReturn(SessionConfig.of(ksqlConfig, ImmutableMap.of()));
    when(node1.isLocal()).thenReturn(true);
    when(node2.isLocal()).thenReturn(false);
    when(node1.location()).thenReturn(URI.create("http://node1:8088"));
    when(node2.location()).thenReturn(URI.create("http://node2:8089"));
    when(badNode.location()).thenReturn(URI.create("http://badnode:8090"));
    when(node1.getHost()).thenReturn(Host.include(new KsqlHostInfo("node1", 8088)));
    when(node2.getHost()).thenReturn(Host.include(new KsqlHostInfo("node2", 8089)));
    when(badNode.getHost()).thenReturn(Host.exclude(new KsqlHostInfo("badnode", 8090), "BAD"));
    location1 = new PartitionLocation(Optional.empty(), 1, ImmutableList.of(node1, node2));
    location2 = new PartitionLocation(Optional.empty(), 2, ImmutableList.of(node2, node1));
    location3 = new PartitionLocation(Optional.empty(), 3, ImmutableList.of(node1, node2));
    location4 = new PartitionLocation(Optional.empty(), 4, ImmutableList.of(node2, node1));
    location5 = new PartitionLocation(Optional.empty(), 4, ImmutableList.of(node2));
    // We require at least two threads, one for the orchestrator, and the other for the partitions.
    when(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PULL_THREAD_POOL_SIZE_CONFIG)).thenReturn(1);
    when(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PULL_ROUTER_THREAD_POOL_SIZE_CONFIG)).thenReturn(1);
    when(serviceContext.getKsqlClient()).thenReturn(ksqlClient);
    pullMetrics = new PullQueryExecutorMetrics(KSQL_SERVICE_ID, Collections.emptyMap(), time, new Metrics());
    haRouting = new HARouting(routingFilterFactory, Optional.of(pullMetrics), ksqlConfig);
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) KsqlHostInfo(io.confluent.ksql.util.KsqlHostInfo) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) PartitionLocation(io.confluent.ksql.execution.streams.materialization.ks.KsLocator.PartitionLocation) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation) Before(org.junit.Before)

Aggregations

PullQueryExecutorMetrics (io.confluent.ksql.internal.PullQueryExecutorMetrics)8 ScalablePushQueryMetrics (io.confluent.ksql.internal.ScalablePushQueryMetrics)5 SlidingWindowRateLimiter (io.confluent.ksql.api.server.SlidingWindowRateLimiter)4 ImmutableAnalysis (io.confluent.ksql.analyzer.ImmutableAnalysis)3 Query (io.confluent.ksql.parser.tree.Query)3 PullPhysicalPlanType (io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType)3 QueryId (io.confluent.ksql.query.QueryId)3 KsqlConfig (io.confluent.ksql.util.KsqlConfig)3 Optional (java.util.Optional)3 AtomicReference (java.util.concurrent.atomic.AtomicReference)3 Metrics (org.apache.kafka.common.metrics.Metrics)3 Before (org.junit.Before)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)2 MetricsCallback (io.confluent.ksql.api.server.MetricsCallback)2 RoutingFilterFactory (io.confluent.ksql.execution.streams.RoutingFilter.RoutingFilterFactory)2 RoutingOptions (io.confluent.ksql.execution.streams.RoutingOptions)2 KsqlPartitionLocation (io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation)2 InternalFunctionRegistry (io.confluent.ksql.function.InternalFunctionRegistry)2 AliasedRelation (io.confluent.ksql.parser.tree.AliasedRelation)2