Search in sources :

Example 1 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class PushRouting method connectToHosts.

/**
 * Connects to all of the hosts provided.
 * @return A future for a PushConnectionsHandle, which can be used to terminate connections.
 */
@SuppressWarnings("checkstyle:ParameterNumber")
private CompletableFuture<PushConnectionsHandle> connectToHosts(final ServiceContext serviceContext, final PushPhysicalPlanManager pushPhysicalPlanManager, final ConfiguredStatement<Query> statement, final Collection<KsqlNode> hosts, final LogicalSchema outputSchema, final TransientQueryQueue transientQueryQueue, final PushConnectionsHandle pushConnectionsHandle, final boolean dynamicallyAddedNode, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics, final Set<KsqlNode> catchupHosts, final PushRoutingOptions pushRoutingOptions, final String thisHostName) {
    final Map<KsqlNode, CompletableFuture<RoutingResult>> futureMap = new LinkedHashMap<>();
    for (final KsqlNode node : hosts) {
        pushConnectionsHandle.add(node, new RoutingResult(RoutingResultStatus.IN_PROGRESS, () -> {
        }));
        final CompletableFuture<Void> callback = new CompletableFuture<>();
        callback.handle((v, t) -> {
            if (t == null) {
                pushConnectionsHandle.get(node).ifPresent(result -> {
                    result.close();
                    result.updateStatus(RoutingResultStatus.COMPLETE);
                });
                LOG.info("Host {} completed request {}.", node, pushPhysicalPlanManager.getQueryId());
            } else if (t instanceof GapFoundException) {
                pushConnectionsHandle.get(node).ifPresent(result -> {
                    result.close();
                    result.updateStatus(RoutingResultStatus.OFFSET_GAP_FOUND);
                });
            } else {
                pushConnectionsHandle.completeExceptionally(t);
            }
            return null;
        });
        futureMap.put(node, executeOrRouteQuery(node, statement, serviceContext, pushPhysicalPlanManager, outputSchema, transientQueryQueue, callback, scalablePushQueryMetrics, pushConnectionsHandle.getOffsetsTracker(), catchupHosts.contains(node), pushRoutingOptions, thisHostName));
    }
    return CompletableFuture.allOf(futureMap.values().toArray(new CompletableFuture[0])).thenApply(v -> {
        for (final KsqlNode node : hosts) {
            final CompletableFuture<RoutingResult> future = futureMap.get(node);
            final RoutingResult routingResult = future.join();
            pushConnectionsHandle.add(node, routingResult);
        }
        return pushConnectionsHandle;
    }).exceptionally(t -> {
        final KsqlNode node = futureMap.entrySet().stream().filter(e -> e.getValue().isCompletedExceptionally()).map(Entry::getKey).findFirst().orElse(null);
        for (KsqlNode n : hosts) {
            final CompletableFuture<RoutingResult> future = futureMap.get(n);
            // Take whatever completed exceptionally and mark it as failed
            if (future.isCompletedExceptionally()) {
                pushConnectionsHandle.get(n).ifPresent(result -> result.updateStatus(RoutingResultStatus.FAILED));
            } else {
                final RoutingResult routingResult = future.join();
                pushConnectionsHandle.add(node, routingResult);
            }
        }
        LOG.warn("Error routing query {} id {} to host {} at timestamp {} with exception {}", statement.getStatementText(), pushPhysicalPlanManager.getQueryId(), node, System.currentTimeMillis(), t.getCause());
        // retries in that case and don't fail the original request.
        if (!dynamicallyAddedNode) {
            pushConnectionsHandle.completeExceptionally(new KsqlException(String.format("Unable to execute push query \"%s\". %s", statement.getStatementText(), t.getCause().getMessage())));
        }
        return pushConnectionsHandle;
    }).exceptionally(t -> {
        LOG.error("Unexpected error handing exception", t);
        return pushConnectionsHandle;
    });
}
Also used : Query(io.confluent.ksql.parser.tree.Query) LoadingCache(com.google.common.cache.LoadingCache) KeyValue(io.confluent.ksql.util.KeyValue) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RowMetadata(io.confluent.ksql.util.RowMetadata) DataRow(io.confluent.ksql.rest.entity.StreamedRow.DataRow) RestResponse(io.confluent.ksql.rest.client.RestResponse) Context(io.vertx.core.Context) KsqlErrorMessage(io.confluent.ksql.rest.entity.KsqlErrorMessage) PushContinuationToken(io.confluent.ksql.rest.entity.PushContinuationToken) QueryRow(io.confluent.ksql.physical.common.QueryRow) Map(java.util.Map) QueryId(io.confluent.ksql.query.QueryId) KsqlRequestConfig(io.confluent.ksql.util.KsqlRequestConfig) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) BufferedPublisher(io.confluent.ksql.reactive.BufferedPublisher) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) CacheLoader(com.google.common.cache.CacheLoader) KsqlNode(io.confluent.ksql.physical.scalablepush.locator.PushLocator.KsqlNode) BaseSubscriber(io.confluent.ksql.reactive.BaseSubscriber) List(java.util.List) VertxUtils(io.confluent.ksql.util.VertxUtils) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) CacheBuilder(com.google.common.cache.CacheBuilder) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) KeyValueMetadata(io.confluent.ksql.util.KeyValueMetadata) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) OffsetVector(io.confluent.ksql.util.OffsetVector) BiConsumer(java.util.function.BiConsumer) Logger(org.slf4j.Logger) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) GenericRow(io.confluent.ksql.GenericRow) Subscription(org.reactivestreams.Subscription) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) Collections(java.util.Collections) CompletableFuture(java.util.concurrent.CompletableFuture) KsqlNode(io.confluent.ksql.physical.scalablepush.locator.PushLocator.KsqlNode) KsqlException(io.confluent.ksql.util.KsqlException) LinkedHashMap(java.util.LinkedHashMap)

Example 2 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class PushRouting method checkForNewHosts.

private void checkForNewHosts(final ServiceContext serviceContext, final PushPhysicalPlanManager pushPhysicalPlanManager, final ConfiguredStatement<Query> statement, final LogicalSchema outputSchema, final TransientQueryQueue transientQueryQueue, final PushConnectionsHandle pushConnectionsHandle, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics, final PushRoutingOptions pushRoutingOptions, final String thisHostName) {
    VertxUtils.checkContext(pushPhysicalPlanManager.getContext());
    if (pushConnectionsHandle.isClosed()) {
        return;
    }
    final Set<KsqlNode> updatedHosts = registryToNodes.apply(pushPhysicalPlanManager.getScalablePushRegistry());
    final Set<KsqlNode> hosts = pushConnectionsHandle.getActiveHosts();
    final Set<KsqlNode> newHosts = Sets.difference(updatedHosts, hosts).stream().filter(node -> pushConnectionsHandle.get(node).map(routingResult -> routingResult.getStatus() != RoutingResultStatus.IN_PROGRESS).orElse(true)).collect(Collectors.toSet());
    final Set<KsqlNode> removedHosts = Sets.difference(hosts, updatedHosts);
    if (newHosts.size() > 0) {
        LOG.info("Dynamically adding new hosts {} for {}", newHosts, pushPhysicalPlanManager.getQueryId());
        final Set<KsqlNode> catchupHosts = newHosts.stream().filter(node -> pushConnectionsHandle.get(node).map(routingResult -> routingResult.getStatus() == RoutingResultStatus.OFFSET_GAP_FOUND).orElse(false)).collect(Collectors.toSet());
        connectToHosts(serviceContext, pushPhysicalPlanManager, statement, newHosts, outputSchema, transientQueryQueue, pushConnectionsHandle, true, scalablePushQueryMetrics, catchupHosts, pushRoutingOptions, thisHostName);
    }
    if (removedHosts.size() > 0) {
        LOG.info("Dynamically removing hosts {} for {}", removedHosts, pushPhysicalPlanManager.getQueryId());
        for (final KsqlNode node : removedHosts) {
            final RoutingResult result = pushConnectionsHandle.remove(node);
            result.close();
            result.updateStatus(RoutingResultStatus.REMOVED);
        }
    }
    pushPhysicalPlanManager.getContext().owner().setTimer(clusterCheckInterval, timerId -> checkForNewHosts(serviceContext, pushPhysicalPlanManager, statement, outputSchema, transientQueryQueue, pushConnectionsHandle, scalablePushQueryMetrics, pushRoutingOptions, thisHostName));
}
Also used : Query(io.confluent.ksql.parser.tree.Query) LoadingCache(com.google.common.cache.LoadingCache) KeyValue(io.confluent.ksql.util.KeyValue) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RowMetadata(io.confluent.ksql.util.RowMetadata) DataRow(io.confluent.ksql.rest.entity.StreamedRow.DataRow) RestResponse(io.confluent.ksql.rest.client.RestResponse) Context(io.vertx.core.Context) KsqlErrorMessage(io.confluent.ksql.rest.entity.KsqlErrorMessage) PushContinuationToken(io.confluent.ksql.rest.entity.PushContinuationToken) QueryRow(io.confluent.ksql.physical.common.QueryRow) Map(java.util.Map) QueryId(io.confluent.ksql.query.QueryId) KsqlRequestConfig(io.confluent.ksql.util.KsqlRequestConfig) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) BufferedPublisher(io.confluent.ksql.reactive.BufferedPublisher) LogicalSchema(io.confluent.ksql.schema.ksql.LogicalSchema) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) CacheLoader(com.google.common.cache.CacheLoader) KsqlNode(io.confluent.ksql.physical.scalablepush.locator.PushLocator.KsqlNode) BaseSubscriber(io.confluent.ksql.reactive.BaseSubscriber) List(java.util.List) VertxUtils(io.confluent.ksql.util.VertxUtils) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) TransientQueryQueue(io.confluent.ksql.query.TransientQueryQueue) CacheBuilder(com.google.common.cache.CacheBuilder) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) KeyValueMetadata(io.confluent.ksql.util.KeyValueMetadata) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) OffsetVector(io.confluent.ksql.util.OffsetVector) BiConsumer(java.util.function.BiConsumer) Logger(org.slf4j.Logger) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) GenericRow(io.confluent.ksql.GenericRow) Subscription(org.reactivestreams.Subscription) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) Collections(java.util.Collections) KsqlNode(io.confluent.ksql.physical.scalablepush.locator.PushLocator.KsqlNode)

Example 3 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class ListConnectorsExecutor method execute.

@SuppressWarnings("OptionalGetWithoutIsPresent")
public StatementExecutorResponse execute(final ConfiguredStatement<ListConnectors> configuredStatement, final SessionProperties sessionProperties, final KsqlExecutionContext ksqlExecutionContext, final ServiceContext serviceContext) {
    final ConnectClient connectClient = serviceContext.getConnectClient();
    final ConnectResponse<List<String>> connectors = serviceContext.getConnectClient().connectors();
    if (connectors.error().isPresent()) {
        return StatementExecutorResponse.handled(connectErrorHandler.handle(configuredStatement, connectors));
    }
    final List<SimpleConnectorInfo> infos = new ArrayList<>();
    final List<KsqlWarning> warnings = new ArrayList<>();
    final Scope scope = configuredStatement.getStatement().getScope();
    for (final String name : connectors.datum().get()) {
        final ConnectResponse<ConnectorInfo> response = connectClient.describe(name);
        if (response.datum().filter(i -> inScope(i.type(), scope)).isPresent()) {
            final ConnectResponse<ConnectorStateInfo> status = connectClient.status(name);
            infos.add(fromConnectorInfoResponse(name, response, status));
        } else if (response.error().isPresent()) {
            if (scope == Scope.ALL) {
                infos.add(new SimpleConnectorInfo(name, ConnectorType.UNKNOWN, null, null));
            }
            warnings.add(new KsqlWarning(String.format("Could not describe connector %s: %s", name, response.error().get())));
        }
    }
    return StatementExecutorResponse.handled(Optional.of(new ConnectorList(configuredStatement.getStatementText(), warnings, infos)));
}
Also used : SessionProperties(io.confluent.ksql.rest.SessionProperties) Scope(io.confluent.ksql.parser.tree.ListConnectors.Scope) ConnectClient(io.confluent.ksql.services.ConnectClient) ServiceContext(io.confluent.ksql.services.ServiceContext) SimpleConnectorInfo(io.confluent.ksql.rest.entity.SimpleConnectorInfo) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) ArrayList(java.util.ArrayList) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo) List(java.util.List) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) ConnectorList(io.confluent.ksql.rest.entity.ConnectorList) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) State(org.apache.kafka.connect.runtime.AbstractStatus.State) ConnectResponse(io.confluent.ksql.services.ConnectClient.ConnectResponse) Optional(java.util.Optional) ListConnectors(io.confluent.ksql.parser.tree.ListConnectors) AbstractState(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo.AbstractState) ConnectorType(org.apache.kafka.connect.runtime.rest.entities.ConnectorType) KsqlWarning(io.confluent.ksql.rest.entity.KsqlWarning) ConnectorConfig(org.apache.kafka.connect.runtime.ConnectorConfig) SimpleConnectorInfo(io.confluent.ksql.rest.entity.SimpleConnectorInfo) SimpleConnectorInfo(io.confluent.ksql.rest.entity.SimpleConnectorInfo) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) ArrayList(java.util.ArrayList) KsqlWarning(io.confluent.ksql.rest.entity.KsqlWarning) ConnectorList(io.confluent.ksql.rest.entity.ConnectorList) Scope(io.confluent.ksql.parser.tree.ListConnectors.Scope) ConnectClient(io.confluent.ksql.services.ConnectClient) ArrayList(java.util.ArrayList) List(java.util.List) ConnectorList(io.confluent.ksql.rest.entity.ConnectorList) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo)

Example 4 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class ListTopicsExecutor method execute.

public static StatementExecutorResponse execute(final ConfiguredStatement<ListTopics> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext) {
    final KafkaTopicClient client = serviceContext.getTopicClient();
    final Map<String, TopicDescription> topicDescriptions = listTopics(client, statement);
    if (statement.getStatement().getShowExtended()) {
        final KafkaConsumerGroupClient consumerGroupClient = new KafkaConsumerGroupClientImpl(serviceContext::getAdminClient);
        final Map<String, List<Integer>> topicConsumersAndGroupCount = getTopicConsumerAndGroupCounts(consumerGroupClient);
        final List<KafkaTopicInfoExtended> topicInfoExtendedList = topicDescriptions.values().stream().map(desc -> topicDescriptionToTopicInfoExtended(desc, topicConsumersAndGroupCount)).collect(Collectors.toList());
        return StatementExecutorResponse.handled(Optional.of(new KafkaTopicsListExtended(statement.getStatementText(), topicInfoExtendedList)));
    } else {
        final List<KafkaTopicInfo> topicInfoList = topicDescriptions.values().stream().map(ListTopicsExecutor::topicDescriptionToTopicInfo).collect(Collectors.toList());
        return StatementExecutorResponse.handled(Optional.of(new KafkaTopicsList(statement.getStatementText(), topicInfoList)));
    }
}
Also used : Arrays(java.util.Arrays) SessionProperties(io.confluent.ksql.rest.SessionProperties) ListTopics(io.confluent.ksql.parser.tree.ListTopics) ServiceContext(io.confluent.ksql.services.ServiceContext) ConsumerSummary(io.confluent.ksql.services.KafkaConsumerGroupClient.ConsumerSummary) HashMap(java.util.HashMap) KafkaTopicsList(io.confluent.ksql.rest.entity.KafkaTopicsList) KafkaTopicsListExtended(io.confluent.ksql.rest.entity.KafkaTopicsListExtended) ReservedInternalTopics(io.confluent.ksql.util.ReservedInternalTopics) KafkaTopicClient(io.confluent.ksql.services.KafkaTopicClient) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaConsumerGroupClientImpl(io.confluent.ksql.services.KafkaConsumerGroupClientImpl) Collection(java.util.Collection) Set(java.util.Set) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) Collectors(java.util.stream.Collectors) List(java.util.List) TreeMap(java.util.TreeMap) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) KafkaConsumerGroupClient(io.confluent.ksql.services.KafkaConsumerGroupClient) KafkaTopicInfoExtended(io.confluent.ksql.rest.entity.KafkaTopicInfoExtended) KafkaTopicInfo(io.confluent.ksql.rest.entity.KafkaTopicInfo) Optional(java.util.Optional) KafkaTopicsList(io.confluent.ksql.rest.entity.KafkaTopicsList) KafkaTopicsListExtended(io.confluent.ksql.rest.entity.KafkaTopicsListExtended) KafkaTopicInfo(io.confluent.ksql.rest.entity.KafkaTopicInfo) KafkaTopicClient(io.confluent.ksql.services.KafkaTopicClient) KafkaConsumerGroupClientImpl(io.confluent.ksql.services.KafkaConsumerGroupClientImpl) KafkaTopicInfoExtended(io.confluent.ksql.rest.entity.KafkaTopicInfoExtended) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) KafkaTopicsList(io.confluent.ksql.rest.entity.KafkaTopicsList) ArrayList(java.util.ArrayList) List(java.util.List) KafkaConsumerGroupClient(io.confluent.ksql.services.KafkaConsumerGroupClient)

Example 5 with ConfiguredStatement

use of io.confluent.ksql.statement.ConfiguredStatement in project ksql by confluentinc.

the class DescribeConnectorExecutor method execute.

@SuppressWarnings("OptionalGetWithoutIsPresent")
public StatementExecutorResponse execute(final ConfiguredStatement<DescribeConnector> configuredStatement, final SessionProperties sessionProperties, final KsqlExecutionContext ksqlExecutionContext, final ServiceContext serviceContext) {
    final String connectorName = configuredStatement.getStatement().getConnectorName();
    final ConnectResponse<ConnectorStateInfo> statusResponse = serviceContext.getConnectClient().status(connectorName);
    if (statusResponse.error().isPresent()) {
        return StatementExecutorResponse.handled(connectErrorHandler.handle(configuredStatement, statusResponse));
    }
    final ConnectResponse<ConnectorInfo> infoResponse = serviceContext.getConnectClient().describe(connectorName);
    if (infoResponse.error().isPresent()) {
        return StatementExecutorResponse.handled(connectErrorHandler.handle(configuredStatement, infoResponse));
    }
    final ConnectorStateInfo status = statusResponse.datum().get();
    final ConnectorInfo info = infoResponse.datum().get();
    final Optional<Connector> connector = connectorFactory.apply(info);
    final List<KsqlWarning> warnings;
    final List<String> topics;
    if (connector.isPresent()) {
        // Small optimization. If a connector's info is not found in the response, don't query for
        // active topics with the given connectorName
        final ConnectResponse<Map<String, Map<String, List<String>>>> topicsResponse = serviceContext.getConnectClient().topics(connectorName);
        // server logs.
        if (topicsResponse.error().isPresent() && topicsResponse.httpCode() == HttpStatus.SC_NOT_FOUND) {
            topics = ImmutableList.of();
            warnings = ImmutableList.of();
            LOG.warn("Could not list related topics due to error: " + topicsResponse.error().get());
        } else if (topicsResponse.error().isPresent()) {
            topics = ImmutableList.of();
            warnings = ImmutableList.of(new KsqlWarning("Could not list related topics due to error: " + topicsResponse.error().get()));
        } else {
            topics = topicsResponse.datum().get().get(connectorName).getOrDefault(TOPICS_KEY, ImmutableList.of());
            warnings = ImmutableList.of();
        }
    } else {
        topics = ImmutableList.of();
        warnings = ImmutableList.of();
    }
    final List<SourceDescription> sources;
    if (connector.isPresent()) {
        sources = ksqlExecutionContext.getMetaStore().getAllDataSources().values().stream().filter(source -> topics.contains(source.getKafkaTopicName())).map(source -> SourceDescriptionFactory.create(source, false, ImmutableList.of(), ImmutableList.of(), Optional.empty(), ImmutableList.of(), ImmutableList.of(), ksqlExecutionContext.metricCollectors())).collect(Collectors.toList());
    } else {
        sources = ImmutableList.of();
    }
    final ConnectorDescription description = new ConnectorDescription(configuredStatement.getStatementText(), info.config().get(ConnectorConfig.CONNECTOR_CLASS_CONFIG), status, sources, topics, warnings);
    return StatementExecutorResponse.handled(Optional.of(description));
}
Also used : Connectors(io.confluent.ksql.connect.supported.Connectors) SessionProperties(io.confluent.ksql.rest.SessionProperties) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) SourceDescription(io.confluent.ksql.rest.entity.SourceDescription) Function(java.util.function.Function) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) DescribeConnector(io.confluent.ksql.parser.tree.DescribeConnector) Logger(org.slf4j.Logger) Connector(io.confluent.ksql.connect.Connector) SourceDescriptionFactory(io.confluent.ksql.rest.entity.SourceDescriptionFactory) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) Collectors(java.util.stream.Collectors) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo) List(java.util.List) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) ConnectorDescription(io.confluent.ksql.rest.entity.ConnectorDescription) ConnectResponse(io.confluent.ksql.services.ConnectClient.ConnectResponse) Optional(java.util.Optional) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HttpStatus(org.apache.hc.core5.http.HttpStatus) KsqlWarning(io.confluent.ksql.rest.entity.KsqlWarning) ConnectorConfig(org.apache.kafka.connect.runtime.ConnectorConfig) DescribeConnector(io.confluent.ksql.parser.tree.DescribeConnector) Connector(io.confluent.ksql.connect.Connector) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) KsqlWarning(io.confluent.ksql.rest.entity.KsqlWarning) ConnectorDescription(io.confluent.ksql.rest.entity.ConnectorDescription) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) Map(java.util.Map) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo) SourceDescription(io.confluent.ksql.rest.entity.SourceDescription)

Aggregations

ConfiguredStatement (io.confluent.ksql.statement.ConfiguredStatement)84 Test (org.junit.Test)57 KsqlException (io.confluent.ksql.util.KsqlException)23 Statement (io.confluent.ksql.parser.tree.Statement)22 PreparedStatement (io.confluent.ksql.parser.KsqlParser.PreparedStatement)19 Optional (java.util.Optional)19 ServiceContext (io.confluent.ksql.services.ServiceContext)18 PersistentQueryMetadata (io.confluent.ksql.util.PersistentQueryMetadata)18 KsqlEngine (io.confluent.ksql.engine.KsqlEngine)17 KsqlConfig (io.confluent.ksql.util.KsqlConfig)16 Map (java.util.Map)14 DataSource (io.confluent.ksql.metastore.model.DataSource)12 QueryId (io.confluent.ksql.query.QueryId)12 LogicalSchema (io.confluent.ksql.schema.ksql.LogicalSchema)12 Collectors (java.util.stream.Collectors)12 ImmutableMap (com.google.common.collect.ImmutableMap)11 ListQueries (io.confluent.ksql.parser.tree.ListQueries)11 KsqlStatementException (io.confluent.ksql.util.KsqlStatementException)11 List (java.util.List)11 Query (io.confluent.ksql.parser.tree.Query)10