Search in sources :

Example 16 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class KsqlEngineTestUtil method execute.

@SuppressWarnings({ "rawtypes", "unchecked" })
private static ExecuteResult execute(final ServiceContext serviceContext, final KsqlExecutionContext executionContext, final ParsedStatement stmt, final KsqlConfig ksqlConfig, final Map<String, Object> overriddenProperties, final Optional<DefaultSchemaInjector> schemaInjector) {
    final PreparedStatement<?> prepared = executionContext.prepare(stmt);
    final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, overriddenProperties));
    final ConfiguredStatement<?> withFormats = new DefaultFormatInjector().inject(configured);
    final ConfiguredStatement<?> withSchema = schemaInjector.map(injector -> injector.inject(withFormats)).orElse((ConfiguredStatement) withFormats);
    final ConfiguredStatement<?> reformatted = new SqlFormatInjector(executionContext).inject(withSchema);
    try {
        return executionContext.execute(serviceContext, reformatted);
    } catch (final KsqlStatementException e) {
        // can easily check that the failed statement is the input statement
        throw new KsqlStatementException(e.getRawMessage(), stmt.getStatementText(), e.getCause());
    }
}
Also used : Query(io.confluent.ksql.parser.tree.Query) SequentialQueryIdGenerator(io.confluent.ksql.query.id.SequentialQueryIdGenerator) SchemaRegistryClient(io.confluent.kafka.schemaregistry.client.SchemaRegistryClient) ServiceContext(io.confluent.ksql.services.ServiceContext) SchemaRegistryTopicSchemaSupplier(io.confluent.ksql.schema.ksql.inference.SchemaRegistryTopicSchemaSupplier) ProcessingLogContext(io.confluent.ksql.logging.processing.ProcessingLogContext) MutableMetaStore(io.confluent.ksql.metastore.MutableMetaStore) MetricCollectors(io.confluent.ksql.metrics.MetricCollectors) Function(java.util.function.Function) SessionConfig(io.confluent.ksql.config.SessionConfig) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) ExecuteResult(io.confluent.ksql.KsqlExecutionContext.ExecuteResult) Map(java.util.Map) ParsedStatement(io.confluent.ksql.parser.KsqlParser.ParsedStatement) DefaultFormatInjector(io.confluent.ksql.format.DefaultFormatInjector) QueryIdGenerator(io.confluent.ksql.query.id.QueryIdGenerator) QueryMetadata(io.confluent.ksql.util.QueryMetadata) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) DefaultSchemaInjector(io.confluent.ksql.schema.ksql.inference.DefaultSchemaInjector) Collectors(java.util.stream.Collectors) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) List(java.util.List) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) KsqlEngineMetrics(io.confluent.ksql.internal.KsqlEngineMetrics) Optional(java.util.Optional) Collections(java.util.Collections) PreparedStatement(io.confluent.ksql.parser.KsqlParser.PreparedStatement) DefaultFormatInjector(io.confluent.ksql.format.DefaultFormatInjector) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException)

Example 17 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class QueryEndpoint method createStatement.

private ConfiguredStatement<Query> createStatement(final String queryString, final Map<String, Object> properties, final Map<String, Object> sessionVariables) {
    final List<ParsedStatement> statements = ksqlEngine.parse(queryString);
    if ((statements.size() != 1)) {
        throw new KsqlStatementException(String.format("Expected exactly one KSQL statement; found %d instead", statements.size()), queryString);
    }
    final PreparedStatement<?> ps = ksqlEngine.prepare(statements.get(0), sessionVariables.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().toString())));
    final Statement statement = ps.getStatement();
    if (!(statement instanceof Query)) {
        throw new KsqlStatementException("Not a query", queryString);
    }
    @SuppressWarnings("unchecked") final PreparedStatement<Query> psq = (PreparedStatement<Query>) ps;
    return ConfiguredStatement.of(psq, SessionConfig.of(ksqlConfig, properties));
}
Also used : Query(io.confluent.ksql.parser.tree.Query) ParsedStatement(io.confluent.ksql.parser.KsqlParser.ParsedStatement) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) Statement(io.confluent.ksql.parser.tree.Statement) PreparedStatement(io.confluent.ksql.parser.KsqlParser.PreparedStatement) ParsedStatement(io.confluent.ksql.parser.KsqlParser.ParsedStatement) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) PreparedStatement(io.confluent.ksql.parser.KsqlParser.PreparedStatement)

Example 18 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class HARouting method executeRounds.

private void executeRounds(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final List<KsqlPartitionLocation> locations, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) throws InterruptedException {
    final ExecutorCompletionService<PartitionFetchResult> completionService = new ExecutorCompletionService<>(routerExecutorService);
    final int totalPartitions = locations.size();
    int processedPartitions = 0;
    final Map<Integer, List<Exception>> exceptionsPerPartition = new HashMap<>();
    for (final KsqlPartitionLocation partition : locations) {
        final KsqlNode node = getNodeForRound(partition, routingOptions);
        pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordPartitionFetchRequest(1));
        completionService.submit(() -> routeQuery.routeQuery(node, partition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
    }
    while (processedPartitions < totalPartitions) {
        final Future<PartitionFetchResult> future = completionService.take();
        try {
            final PartitionFetchResult fetchResult = future.get();
            if (fetchResult.isError()) {
                exceptionsPerPartition.computeIfAbsent(fetchResult.location.getPartition(), v -> new ArrayList<>()).add(fetchResult.exception.get());
                final KsqlPartitionLocation nextRoundPartition = nextNode(fetchResult.getLocation());
                final KsqlNode node = getNodeForRound(nextRoundPartition, routingOptions);
                pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordResubmissionRequest(1));
                completionService.submit(() -> routeQuery.routeQuery(node, nextRoundPartition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
            } else {
                Preconditions.checkState(fetchResult.getResult() == RoutingResult.SUCCESS);
                processedPartitions++;
            }
        } catch (final Exception e) {
            final MaterializationException exception = new MaterializationException("Unable to execute pull query: " + e.getMessage());
            for (Entry<Integer, List<Exception>> entry : exceptionsPerPartition.entrySet()) {
                for (Exception excp : entry.getValue()) {
                    exception.addSuppressed(excp);
                }
            }
            throw exception;
        }
    }
    pullQueryQueue.close();
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) Query(io.confluent.ksql.parser.tree.Query) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) RoutingFilterFactory(io.confluent.ksql.execution.streams.RoutingFilter.RoutingFilterFactory) BiFunction(java.util.function.BiFunction) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) KsqlNode(io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode) Header(io.confluent.ksql.rest.entity.StreamedRow.Header) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RestResponse(io.confluent.ksql.rest.client.RestResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) ImmutableList(com.google.common.collect.ImmutableList) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) Host(io.confluent.ksql.execution.streams.RoutingFilter.Host) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) QueryId(io.confluent.ksql.query.QueryId) ExecutorService(java.util.concurrent.ExecutorService) KsqlRequestConfig(io.confluent.ksql.util.KsqlRequestConfig) Logger(org.slf4j.Logger) ImmutableMap(com.google.common.collect.ImmutableMap) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) Consumer(java.util.function.Consumer) List(java.util.List) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) KsqlNode(io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) KsqlException(io.confluent.ksql.util.KsqlException) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Entry(java.util.Map.Entry) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation)

Example 19 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class HARouting method executeOrRouteQuery.

@SuppressWarnings("ParameterNumber")
@VisibleForTesting
static PartitionFetchResult executeOrRouteQuery(final KsqlNode node, final KsqlPartitionLocation location, final ConfiguredStatement<Query> statement, final ServiceContext serviceContext, final RoutingOptions routingOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final PullPhysicalPlan pullPhysicalPlan, final LogicalSchema outputSchema, final QueryId queryId, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
    final BiFunction<List<?>, LogicalSchema, PullQueryRow> rowFactory = (rawRow, schema) -> new PullQueryRow(rawRow, schema, Optional.ofNullable(routingOptions.getIsDebugRequest() ? node : null), Optional.empty());
    if (node.isLocal()) {
        try {
            LOG.debug("Query {} executed locally at host {} at timestamp {}.", statement.getStatementText(), node.location(), System.currentTimeMillis());
            pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordLocalRequests(1));
            synchronized (pullPhysicalPlan) {
                pullPhysicalPlan.execute(ImmutableList.of(location), pullQueryQueue, rowFactory);
                return new PartitionFetchResult(RoutingResult.SUCCESS, location, Optional.empty());
            }
        } catch (StandbyFallbackException | NotUpToBoundException e) {
            LOG.warn("Error executing query locally at node {}. Falling back to standby state which " + "may return stale results. Cause {}", node, e.getMessage());
            return new PartitionFetchResult(RoutingResult.STANDBY_FALLBACK, location, Optional.of(e));
        } catch (Exception e) {
            throw new KsqlException(String.format("Error executing query locally at node %s: %s", node.location(), e.getMessage()), e);
        }
    } else {
        try {
            LOG.debug("Query {} routed to host {} at timestamp {}.", statement.getStatementText(), node.location(), System.currentTimeMillis());
            pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordRemoteRequests(1));
            forwardTo(node, ImmutableList.of(location), statement, serviceContext, pullQueryQueue, rowFactory, outputSchema, shouldCancelRequests, consistencyOffsetVector);
            return new PartitionFetchResult(RoutingResult.SUCCESS, location, Optional.empty());
        } catch (StandbyFallbackException e) {
            LOG.warn("Error forwarding query to node {}. Falling back to standby state which may " + "return stale results", node.location(), e.getCause());
            return new PartitionFetchResult(RoutingResult.STANDBY_FALLBACK, location, Optional.of(e));
        } catch (Exception e) {
            throw new KsqlException(String.format("Error forwarding query to node %s: %s", node.location(), e.getMessage()), e);
        }
    }
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) Query(io.confluent.ksql.parser.tree.Query) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) RoutingFilterFactory(io.confluent.ksql.execution.streams.RoutingFilter.RoutingFilterFactory) BiFunction(java.util.function.BiFunction) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) KsqlNode(io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode) Header(io.confluent.ksql.rest.entity.StreamedRow.Header) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RestResponse(io.confluent.ksql.rest.client.RestResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) ImmutableList(com.google.common.collect.ImmutableList) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) Host(io.confluent.ksql.execution.streams.RoutingFilter.Host) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) QueryId(io.confluent.ksql.query.QueryId) ExecutorService(java.util.concurrent.ExecutorService) KsqlRequestConfig(io.confluent.ksql.util.KsqlRequestConfig) Logger(org.slf4j.Logger) ImmutableMap(com.google.common.collect.ImmutableMap) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) Consumer(java.util.function.Consumer) List(java.util.List) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) KsqlException(io.confluent.ksql.util.KsqlException) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) KsqlException(io.confluent.ksql.util.KsqlException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 20 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class HARouting method handlePullQuery.

public CompletableFuture<Void> handlePullQuery(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
    final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator().locate(pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN);
    final Map<Integer, List<Host>> emptyPartitions = allLocations.stream().filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())).collect(Collectors.toMap(KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList())));
    if (!emptyPartitions.isEmpty()) {
        final MaterializationException materializationException = new MaterializationException("Unable to execute pull query. " + emptyPartitions.entrySet().stream().map(kv -> String.format("Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())).collect(Collectors.joining(", ", "[", "]")));
        LOG.debug(materializationException.getMessage());
        throw materializationException;
    }
    // at this point we should filter out the hosts that we should not route to
    final List<KsqlPartitionLocation> locations = allLocations.stream().map(KsqlPartitionLocation::removeFilteredHosts).collect(Collectors.toList());
    final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
    coordinatorExecutorService.submit(() -> {
        try {
            executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, outputSchema, queryId, locations, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
            completableFuture.complete(null);
        } catch (Throwable t) {
            completableFuture.completeExceptionally(t);
        }
    });
    return completableFuture;
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) Query(io.confluent.ksql.parser.tree.Query) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) RoutingFilterFactory(io.confluent.ksql.execution.streams.RoutingFilter.RoutingFilterFactory) BiFunction(java.util.function.BiFunction) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) KsqlNode(io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode) Header(io.confluent.ksql.rest.entity.StreamedRow.Header) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RestResponse(io.confluent.ksql.rest.client.RestResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) ImmutableList(com.google.common.collect.ImmutableList) NotUpToBoundException(io.confluent.ksql.execution.streams.materialization.ks.NotUpToBoundException) Host(io.confluent.ksql.execution.streams.RoutingFilter.Host) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) QueryId(io.confluent.ksql.query.QueryId) ExecutorService(java.util.concurrent.ExecutorService) KsqlRequestConfig(io.confluent.ksql.util.KsqlRequestConfig) Logger(org.slf4j.Logger) ImmutableMap(com.google.common.collect.ImmutableMap) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) Consumer(java.util.function.Consumer) List(java.util.List) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation) PullPhysicalPlanType(io.confluent.ksql.physical.pull.PullPhysicalPlan.PullPhysicalPlanType) PullQueryQueue(io.confluent.ksql.query.PullQueryQueue) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) CompletableFuture(java.util.concurrent.CompletableFuture) KsqlPartitionLocation(io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException)

Aggregations

ConfiguredStatement (io.confluent.ksql.statement.ConfiguredStatement)84 Test (org.junit.Test)57 KsqlException (io.confluent.ksql.util.KsqlException)23 Statement (io.confluent.ksql.parser.tree.Statement)22 PreparedStatement (io.confluent.ksql.parser.KsqlParser.PreparedStatement)19 Optional (java.util.Optional)19 ServiceContext (io.confluent.ksql.services.ServiceContext)18 PersistentQueryMetadata (io.confluent.ksql.util.PersistentQueryMetadata)18 KsqlEngine (io.confluent.ksql.engine.KsqlEngine)17 KsqlConfig (io.confluent.ksql.util.KsqlConfig)16 Map (java.util.Map)14 DataSource (io.confluent.ksql.metastore.model.DataSource)12 QueryId (io.confluent.ksql.query.QueryId)12 LogicalSchema (io.confluent.ksql.schema.ksql.LogicalSchema)12 Collectors (java.util.stream.Collectors)12 ImmutableMap (com.google.common.collect.ImmutableMap)11 ListQueries (io.confluent.ksql.parser.tree.ListQueries)11 KsqlStatementException (io.confluent.ksql.util.KsqlStatementException)11 List (java.util.List)11 Query (io.confluent.ksql.parser.tree.Query)10