use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.
the class EngineExecutor method executeScalablePushQuery.
ScalablePushQueryMetadata executeScalablePushQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final PushRouting pushRouting, final PushRoutingOptions pushRoutingOptions, final QueryPlannerOptions queryPlannerOptions, final Context context, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics) {
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = pushRoutingOptions.getHasBeenForwarded() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PushPhysicalPlan plan = null;
try {
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, true);
final PushPhysicalPlanCreator pushPhysicalPlanCreator = (offsetRange, catchupConsumerGroup) -> buildScalablePushPhysicalPlan(logicalPlan, analysis, context, offsetRange, catchupConsumerGroup);
final Optional<PushOffsetRange> offsetRange = pushRoutingOptions.getContinuationToken().map(PushOffsetRange::deserialize);
final Optional<String> catchupConsumerGroup = pushRoutingOptions.getCatchupConsumerGroup();
final PushPhysicalPlanManager physicalPlanManager = new PushPhysicalPlanManager(pushPhysicalPlanCreator, catchupConsumerGroup, offsetRange);
final PushPhysicalPlan physicalPlan = physicalPlanManager.getPhysicalPlan();
plan = physicalPlan;
final TransientQueryQueue transientQueryQueue = new TransientQueryQueue(analysis.getLimitClause());
final PushQueryMetadata.ResultType resultType = physicalPlan.getScalablePushRegistry().isTable() ? physicalPlan.getScalablePushRegistry().isWindowed() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM;
final PushQueryQueuePopulator populator = () -> pushRouting.handlePushQuery(serviceContext, physicalPlanManager, statement, pushRoutingOptions, physicalPlan.getOutputSchema(), transientQueryQueue, scalablePushQueryMetrics, offsetRange);
final PushQueryPreparer preparer = () -> pushRouting.preparePushQuery(physicalPlanManager, statement, pushRoutingOptions);
final ScalablePushQueryMetadata metadata = new ScalablePushQueryMetadata(physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), transientQueryQueue, scalablePushQueryMetrics, resultType, populator, preparer, physicalPlan.getSourceType(), routingNodeType, physicalPlan::getRowsReadFromDataSource);
return metadata;
} catch (final Exception e) {
if (plan == null) {
scalablePushQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PushPhysicalPlan pushPhysicalPlan = plan;
scalablePushQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, pushPhysicalPlan.getSourceType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute push query V2 {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information." + " If you see this LOG message, please submit a GitHub ticket and" + " we will scrub the statement text from the error at {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute push query V2. {} {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed push query V2 text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.
the class KafkaStreamsQueryValidatorTest method shouldLimitBufferCacheUsage.
@Test
public void shouldLimitBufferCacheUsage() {
// Given:
final SessionConfig config = configWithLimits(5, OptionalLong.of(30));
// When/Then:
assertThrows(KsqlException.class, () -> queryValidator.validateQuery(config, plan, queries));
}
use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.
the class KsqlEngineTest method shouldCheckStreamPullQueryEnabledFlag.
@Test
public void shouldCheckStreamPullQueryEnabledFlag() {
@SuppressWarnings("unchecked") final ConfiguredStatement<Query> statementOrig = mock(ConfiguredStatement.class);
when(statementOrig.getStatementText()).thenReturn("TEXT");
final SessionConfig mockSessionConfig = mock(SessionConfig.class);
final KsqlConfig mockConfig = mock(KsqlConfig.class);
when(statementOrig.getSessionConfig()).thenReturn(mockSessionConfig);
when(mockSessionConfig.getConfig(eq(true))).thenReturn(mockConfig);
when(mockConfig.getBoolean(eq(KsqlConfig.KSQL_QUERY_STREAM_PULL_QUERY_ENABLED))).thenReturn(false);
final KsqlStatementException ksqlStatementException = assertThrows(KsqlStatementException.class, () -> ksqlEngine.createStreamPullQuery(null, null, statementOrig, false));
assertThat(ksqlStatementException.getSqlStatement(), is("TEXT"));
assertThat(ksqlStatementException.getRawMessage(), is("Pull queries on streams are disabled." + " To create a push query on the stream, add EMIT CHANGES to the end." + " To enable pull queries on streams," + " set the ksql.query.pull.stream.enabled config to 'true'."));
}
use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.
the class EngineExecutor method executeTablePullQuery.
/**
* Evaluates a pull query by first analyzing it, then building the logical plan and finally
* the physical plan. The execution is then done using the physical plan in a pipelined manner.
* @param statement The pull query
* @param routingOptions Configuration parameters used for HA routing
* @param pullQueryMetrics JMX metrics
* @return the rows that are the result of evaluating the pull query
*/
PullQueryResult executeTablePullQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final HARouting routing, final RoutingOptions routingOptions, final QueryPlannerOptions queryPlannerOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final boolean startImmediately, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
if (!statement.getStatement().isPullQuery()) {
throw new IllegalArgumentException("Executor can only handle pull queries");
}
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = routingOptions.getIsSkipForwardRequest() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PullPhysicalPlan plan = null;
try {
// Do not set sessionConfig.getConfig to true! The copying is inefficient and slows down pull
// query performance significantly. Instead use QueryPlannerOptions which check overrides
// deliberately.
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, false);
// This is a cancel signal that is used to stop both local operations and requests
final CompletableFuture<Void> shouldCancelRequests = new CompletableFuture<>();
plan = buildPullPhysicalPlan(logicalPlan, analysis, queryPlannerOptions, shouldCancelRequests, consistencyOffsetVector);
final PullPhysicalPlan physicalPlan = plan;
final PullQueryQueue pullQueryQueue = new PullQueryQueue(analysis.getLimitClause());
final PullQueryQueuePopulator populator = () -> routing.handlePullQuery(serviceContext, physicalPlan, statement, routingOptions, physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
final PullQueryResult result = new PullQueryResult(physicalPlan.getOutputSchema(), populator, physicalPlan.getQueryId(), pullQueryQueue, pullQueryMetrics, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType, physicalPlan::getRowsReadFromDataSource, shouldCancelRequests, consistencyOffsetVector);
if (startImmediately) {
result.start();
}
return result;
} catch (final Exception e) {
if (plan == null) {
pullQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PullPhysicalPlan physicalPlan = plan;
pullQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute pull query {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information. If you " + "see this LOG message, please submit a GitHub ticket and we will scrub " + "the statement text from the error at {}", routingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute pull query. {} {}", routingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed pull query text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
use of io.confluent.ksql.config.SessionConfig in project ksql by confluentinc.
the class InteractiveStatementExecutorTest method commandWithPlan.
private Command commandWithPlan(final String sql, final Map<String, String> originalProperties) {
final PreparedStatement<?> prepared = statementParser.parseSingleStatement(sql);
final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, emptyMap());
final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, sessionConfig);
final KsqlPlan plan = ksqlEngine.plan(serviceContext, configured);
return new Command(sql, Collections.emptyMap(), originalProperties, Optional.of(plan));
}
Aggregations