Search in sources :

Example 11 with SessionConfig

use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.

the class KafkaStreamsQueryValidatorTest method shouldIgnoreGlobalLimitSetInOverrides.

@Test
public void shouldIgnoreGlobalLimitSetInOverrides() {
    // Given:
    final SessionConfig config = SessionConfig.of(new KsqlConfig(ImmutableMap.of(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING, 30)), ImmutableMap.of(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 50, KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING, 500));
    // When/Then:
    assertThrows(KsqlException.class, () -> queryValidator.validateQuery(config, plan, queries));
}
Also used : SessionConfig(io.confluent.ksql.config.SessionConfig) KsqlConfig(io.confluent.ksql.util.KsqlConfig) Test(org.junit.Test)

Example 12 with SessionConfig

use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.

the class QueryExecutor method handleQuery.

private QueryMetadataHolder handleQuery(final ServiceContext serviceContext, final PreparedStatement<Query> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final Context context, final boolean excludeTombstones) {
    if (statement.getStatement().isPullQuery()) {
        final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
        final DataSource dataSource = analysis.getFrom().getDataSource();
        final DataSource.DataSourceType dataSourceType = dataSource.getDataSourceType();
        if (!ksqlConfig.getBoolean(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG)) {
            throw new KsqlStatementException("Pull queries are disabled." + PullQueryValidator.PULL_QUERY_SYNTAX_HELP + System.lineSeparator() + "Please set " + KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG + "=true to enable " + "this feature." + System.lineSeparator(), statement.getStatementText());
        }
        Optional<ConsistencyOffsetVector> consistencyOffsetVector = Optional.empty();
        if (ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR_ENABLED) && requestProperties.containsKey(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR)) {
            final String serializedCV = (String) requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR);
            // serializedCV will be empty on the first request as the consistency vector is initialized
            // at the server
            consistencyOffsetVector = serializedCV != null && !serializedCV.equals("") ? Optional.of(ConsistencyOffsetVector.deserialize(serializedCV)) : Optional.of(ConsistencyOffsetVector.emptyVector());
        }
        switch(dataSourceType) {
            case KTABLE:
                {
                    // First thing, set the metrics callback so that it gets called, even if we hit an error
                    final AtomicReference<PullQueryResult> resultForMetrics = new AtomicReference<>(null);
                    metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullTableMetricsCallback(pullQueryMetrics, pullBandRateLimiter, resultForMetrics));
                    final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
                    final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
                    return handleTablePullQuery(analysis, serviceContext, configured, requestProperties, isInternalRequest, pullBandRateLimiter, resultForMetrics, consistencyOffsetVector);
                }
            case KSTREAM:
                {
                    // First thing, set the metrics callback so that it gets called, even if we hit an error
                    final AtomicReference<StreamPullQueryMetadata> resultForMetrics = new AtomicReference<>(null);
                    final AtomicReference<Decrementer> refDecrementer = new AtomicReference<>(null);
                    metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullStreamMetricsCallback(pullQueryMetrics, pullBandRateLimiter, analysis, resultForMetrics, refDecrementer));
                    final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
                    final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
                    return handleStreamPullQuery(analysis, serviceContext, configured, resultForMetrics, refDecrementer);
                }
            default:
                throw new KsqlStatementException("Unexpected data source type for pull query: " + dataSourceType, statement.getStatementText());
        }
    } else if (ScalablePushUtil.isScalablePushQuery(statement.getStatement(), ksqlEngine, ksqlConfig, configOverrides)) {
        // First thing, set the metrics callback so that it gets called, even if we hit an error
        final AtomicReference<ScalablePushQueryMetadata> resultForMetrics = new AtomicReference<>(null);
        metricsCallbackHolder.setCallback(QueryMetricsUtil.initializeScalablePushMetricsCallback(scalablePushQueryMetrics, scalablePushBandRateLimiter, resultForMetrics));
        final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
        QueryLogger.info("Scalable push query created", statement.getStatementText());
        return handleScalablePushQuery(analysis, serviceContext, statement, configOverrides, requestProperties, context, scalablePushBandRateLimiter, resultForMetrics);
    } else {
        // log validated statements for query anonymization
        QueryLogger.info("Transient query created", statement.getStatementText());
        return handlePushQuery(serviceContext, statement, configOverrides, excludeTombstones);
    }
}
Also used : ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) SessionConfig(io.confluent.ksql.config.SessionConfig) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) AtomicReference(java.util.concurrent.atomic.AtomicReference) DataSource(io.confluent.ksql.metastore.model.DataSource)

Aggregations

SessionConfig (io.confluent.ksql.config.SessionConfig)12 Test (org.junit.Test)6 KsqlConfig (io.confluent.ksql.util.KsqlConfig)4 ImmutableAnalysis (io.confluent.ksql.analyzer.ImmutableAnalysis)3 DataSource (io.confluent.ksql.metastore.model.DataSource)3 Query (io.confluent.ksql.parser.tree.Query)3 KsqlStatementException (io.confluent.ksql.util.KsqlStatementException)3 Throwables (com.google.common.base.Throwables)2 ImmutableMap (com.google.common.collect.ImmutableMap)2 ImmutableSet (com.google.common.collect.ImmutableSet)2 Iterables (com.google.common.collect.Iterables)2 SuppressFBWarnings (edu.umd.cs.findbugs.annotations.SuppressFBWarnings)2 ExecuteResult (io.confluent.ksql.KsqlExecutionContext.ExecuteResult)2 CreateTableCommand (io.confluent.ksql.execution.ddl.commands.CreateTableCommand)2 DdlCommand (io.confluent.ksql.execution.ddl.commands.DdlCommand)2 KsqlTopic (io.confluent.ksql.execution.ddl.commands.KsqlTopic)2 UnqualifiedColumnReferenceExp (io.confluent.ksql.execution.expression.tree.UnqualifiedColumnReferenceExp)2 ExecutionStep (io.confluent.ksql.execution.plan.ExecutionStep)2 Formats (io.confluent.ksql.execution.plan.Formats)2 PlanInfo (io.confluent.ksql.execution.plan.PlanInfo)2