use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class RequestValidator method validate.
/**
* @return the number of persistent queries that were validated
*
* @throws KsqlStatementException if the statement cannot be validated
*/
@SuppressWarnings("unchecked")
private <T extends Statement> int validate(final ServiceContext serviceContext, final ConfiguredStatement<T> configured, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final Injector injector) throws KsqlStatementException {
final Statement statement = configured.getStatement();
final Class<? extends Statement> statementClass = statement.getClass();
final StatementValidator<T> customValidator = (StatementValidator<T>) customValidators.get(statementClass);
if (customValidator != null) {
customValidator.validate(configured, sessionProperties, executionContext, serviceContext);
} else if (KsqlEngine.isExecutableStatement(configured.getStatement()) || configured.getStatement() instanceof TerminateQuery) {
final ConfiguredStatement<?> statementInjected = injector.inject(configured);
distributedStatementValidator.create(statementInjected, serviceContext, executionContext);
} else {
throw new KsqlStatementException("Do not know how to validate statement of type: " + statementClass + " Known types: " + customValidators.keySet(), configured.getStatementText());
}
return (statement instanceof CreateAsSelect || statement instanceof InsertInto) ? 1 : 0;
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class QueryExecutor method handleQuery.
private QueryMetadataHolder handleQuery(final ServiceContext serviceContext, final PreparedStatement<Query> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final Context context, final boolean excludeTombstones) {
if (statement.getStatement().isPullQuery()) {
final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
final DataSource dataSource = analysis.getFrom().getDataSource();
final DataSource.DataSourceType dataSourceType = dataSource.getDataSourceType();
if (!ksqlConfig.getBoolean(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG)) {
throw new KsqlStatementException("Pull queries are disabled." + PullQueryValidator.PULL_QUERY_SYNTAX_HELP + System.lineSeparator() + "Please set " + KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG + "=true to enable " + "this feature." + System.lineSeparator(), statement.getStatementText());
}
Optional<ConsistencyOffsetVector> consistencyOffsetVector = Optional.empty();
if (ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR_ENABLED) && requestProperties.containsKey(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR)) {
final String serializedCV = (String) requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR);
// serializedCV will be empty on the first request as the consistency vector is initialized
// at the server
consistencyOffsetVector = serializedCV != null && !serializedCV.equals("") ? Optional.of(ConsistencyOffsetVector.deserialize(serializedCV)) : Optional.of(ConsistencyOffsetVector.emptyVector());
}
switch(dataSourceType) {
case KTABLE:
{
// First thing, set the metrics callback so that it gets called, even if we hit an error
final AtomicReference<PullQueryResult> resultForMetrics = new AtomicReference<>(null);
metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullTableMetricsCallback(pullQueryMetrics, pullBandRateLimiter, resultForMetrics));
final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
return handleTablePullQuery(analysis, serviceContext, configured, requestProperties, isInternalRequest, pullBandRateLimiter, resultForMetrics, consistencyOffsetVector);
}
case KSTREAM:
{
// First thing, set the metrics callback so that it gets called, even if we hit an error
final AtomicReference<StreamPullQueryMetadata> resultForMetrics = new AtomicReference<>(null);
final AtomicReference<Decrementer> refDecrementer = new AtomicReference<>(null);
metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullStreamMetricsCallback(pullQueryMetrics, pullBandRateLimiter, analysis, resultForMetrics, refDecrementer));
final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
return handleStreamPullQuery(analysis, serviceContext, configured, resultForMetrics, refDecrementer);
}
default:
throw new KsqlStatementException("Unexpected data source type for pull query: " + dataSourceType, statement.getStatementText());
}
} else if (ScalablePushUtil.isScalablePushQuery(statement.getStatement(), ksqlEngine, ksqlConfig, configOverrides)) {
// First thing, set the metrics callback so that it gets called, even if we hit an error
final AtomicReference<ScalablePushQueryMetadata> resultForMetrics = new AtomicReference<>(null);
metricsCallbackHolder.setCallback(QueryMetricsUtil.initializeScalablePushMetricsCallback(scalablePushQueryMetrics, scalablePushBandRateLimiter, resultForMetrics));
final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
QueryLogger.info("Scalable push query created", statement.getStatementText());
return handleScalablePushQuery(analysis, serviceContext, statement, configOverrides, requestProperties, context, scalablePushBandRateLimiter, resultForMetrics);
} else {
// log validated statements for query anonymization
QueryLogger.info("Transient query created", statement.getStatementText());
return handlePushQuery(serviceContext, statement, configOverrides, excludeTombstones);
}
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class QueryEndpoint method createQueryPublisher.
public QueryPublisher createQueryPublisher(final String sql, final Map<String, Object> properties, final Map<String, Object> sessionVariables, final Map<String, Object> requestProperties, final Context context, final WorkerExecutor workerExecutor, final ServiceContext serviceContext, final MetricsCallbackHolder metricsCallbackHolder, final Optional<Boolean> isInternalRequest) {
// Must be run on worker as all this stuff is slow
VertxUtils.checkIsWorker();
final ConfiguredStatement<Query> statement = createStatement(sql, properties, sessionVariables);
final QueryMetadataHolder queryMetadataHolder = queryExecutor.handleStatement(serviceContext, properties, requestProperties, statement.getPreparedStatement(), isInternalRequest, metricsCallbackHolder, context, false);
if (queryMetadataHolder.getPullQueryResult().isPresent()) {
final PullQueryResult result = queryMetadataHolder.getPullQueryResult().get();
final BlockingQueryPublisher publisher = new BlockingQueryPublisher(context, workerExecutor);
publisher.setQueryHandle(new KsqlPullQueryHandle(result, pullQueryMetrics, statement.getPreparedStatement().getStatementText()), true, false);
// Start from the worker thread so that errors can bubble up, and we can get a proper response
// code rather than waiting until later after the header has been written and all we can do
// is write an error message.
publisher.startFromWorkerThread();
return publisher;
} else if (queryMetadataHolder.getPushQueryMetadata().isPresent()) {
final PushQueryMetadata metadata = queryMetadataHolder.getPushQueryMetadata().get();
final BlockingQueryPublisher publisher = new BlockingQueryPublisher(context, workerExecutor);
publisher.setQueryHandle(new KsqlQueryHandle(metadata), false, queryMetadataHolder.getScalablePushQueryMetadata().isPresent());
return publisher;
} else {
throw new KsqlStatementException("Unexpected metadata for query", statement.getStatementText());
}
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlResourceTest method shouldFailWhenTopicInferenceFailsDuringExecute.
@Test
public void shouldFailWhenTopicInferenceFailsDuringExecute() {
// Given:
givenSource(DataSourceType.KSTREAM, "ORDERS1", "ORDERS1", SOME_SCHEMA);
when(topicInjector.inject(any())).thenThrow(new KsqlStatementException("boom", "some-sql"));
// When:
final KsqlRestException e = assertThrows(KsqlRestException.class, () -> makeRequest("CREATE STREAM orders2 AS SELECT * FROM orders1;"));
// Then:
assertThat(e, exceptionStatusCode(is(BAD_REQUEST.code())));
assertThat(e, exceptionErrorMessage(errorCode(is(Errors.ERROR_CODE_BAD_STATEMENT))));
assertThat(e, exceptionStatementErrorMessage(errorMessage(is("boom"))));
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class DefaultSchemaInjector method forCreateAsStatement.
private Optional<ConfiguredStatement<CreateAsSelect>> forCreateAsStatement(final ConfiguredStatement<CreateAsSelect> statement) {
final CreateAsSelect csStmt = statement.getStatement();
final CreateSourceAsProperties properties = csStmt.getProperties();
// Don't need to inject schema if no key schema id and value schema id
if (!properties.getKeySchemaId().isPresent() && !properties.getValueSchemaId().isPresent()) {
return Optional.empty();
}
final CreateSourceCommand createSourceCommand;
try {
final ServiceContext sandboxServiceContext = SandboxedServiceContext.create(serviceContext);
createSourceCommand = (CreateSourceCommand) executionContext.createSandbox(sandboxServiceContext).plan(sandboxServiceContext, statement).getDdlCommand().get();
} catch (final Exception e) {
throw new KsqlStatementException("Could not determine output schema for query due to error: " + e.getMessage(), statement.getStatementText(), e);
}
final Optional<SchemaAndId> keySchema = getCreateAsKeySchema(statement, createSourceCommand);
final Optional<SchemaAndId> valueSchema = getCreateAsValueSchema(statement, createSourceCommand);
final CreateAsSelect withSchema = addSchemaFieldsCas(statement, keySchema, valueSchema);
final PreparedStatement<CreateAsSelect> prepared = buildPreparedStatement(withSchema);
final ImmutableMap.Builder<String, Object> overrideBuilder = ImmutableMap.builder();
// Only store raw schema if schema id is provided by user
if (properties.getKeySchemaId().isPresent()) {
keySchema.map(schemaAndId -> overrideBuilder.put(CommonCreateConfigs.KEY_SCHEMA_ID, schemaAndId));
}
if (properties.getValueSchemaId().isPresent()) {
valueSchema.map(schemaAndId -> overrideBuilder.put(CommonCreateConfigs.VALUE_SCHEMA_ID, schemaAndId));
}
final ConfiguredStatement<CreateAsSelect> configured = ConfiguredStatement.of(prepared, statement.getSessionConfig().copyWith(overrideBuilder.build()));
return Optional.of(configured);
}
Aggregations