use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class SchemaRegisterInjector method sanityCheck.
private static void sanityCheck(final SchemaAndId schemaAndId, final FormatInfo formatInfo, final String topic, final KsqlConfig config, final String statementText, final boolean isKey) {
final String schemaIdPropStr = isKey ? CommonCreateConfigs.KEY_SCHEMA_ID : CommonCreateConfigs.VALUE_SCHEMA_ID;
final Format format = FormatFactory.of(formatInfo);
if (!canRegister(format, config, topic)) {
throw new KsqlStatementException(schemaIdPropStr + " is provided but format " + format.name() + " doesn't support registering in Schema Registry", statementText);
}
final SchemaTranslator translator = format.getSchemaTranslator(formatInfo.getProperties());
if (!translator.name().equals(schemaAndId.rawSchema.schemaType())) {
throw new KsqlStatementException(String.format("Format and fetched schema type using %s %d are different. Format: [%s], " + "Fetched schema type: [%s].", schemaIdPropStr, schemaAndId.id, format.name(), schemaAndId.rawSchema.schemaType()), statementText);
}
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class SchemaRegisterInjector method registerSchema.
private void registerSchema(final List<? extends SimpleColumn> schema, final String topic, final FormatInfo formatInfo, final SerdeFeatures serdeFeatures, final KsqlConfig config, final String statementText, final boolean registerIfSchemaExists, final String subject, final boolean isKey) {
final Format format = FormatFactory.of(formatInfo);
if (!canRegister(format, config, topic)) {
return;
}
final SchemaRegistryClient srClient = serviceContext.getSchemaRegistryClient();
if (registerIfSchemaExists || !SchemaRegistryUtil.subjectExists(srClient, subject)) {
try {
final SchemaTranslator translator = format.getSchemaTranslator(formatInfo.getProperties());
final ParsedSchema parsedSchema = translator.toParsedSchema(PersistenceSchema.from(schema, serdeFeatures));
SchemaRegistryUtil.registerSchema(srClient, parsedSchema, topic, subject, isKey);
} catch (KsqlException e) {
throw new KsqlStatementException("Could not register schema for topic: " + e.getMessage(), statementText, e);
}
}
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class EngineExecutor method executeTablePullQuery.
/**
* Evaluates a pull query by first analyzing it, then building the logical plan and finally
* the physical plan. The execution is then done using the physical plan in a pipelined manner.
* @param statement The pull query
* @param routingOptions Configuration parameters used for HA routing
* @param pullQueryMetrics JMX metrics
* @return the rows that are the result of evaluating the pull query
*/
PullQueryResult executeTablePullQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final HARouting routing, final RoutingOptions routingOptions, final QueryPlannerOptions queryPlannerOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final boolean startImmediately, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
if (!statement.getStatement().isPullQuery()) {
throw new IllegalArgumentException("Executor can only handle pull queries");
}
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = routingOptions.getIsSkipForwardRequest() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PullPhysicalPlan plan = null;
try {
// Do not set sessionConfig.getConfig to true! The copying is inefficient and slows down pull
// query performance significantly. Instead use QueryPlannerOptions which check overrides
// deliberately.
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, false);
// This is a cancel signal that is used to stop both local operations and requests
final CompletableFuture<Void> shouldCancelRequests = new CompletableFuture<>();
plan = buildPullPhysicalPlan(logicalPlan, analysis, queryPlannerOptions, shouldCancelRequests, consistencyOffsetVector);
final PullPhysicalPlan physicalPlan = plan;
final PullQueryQueue pullQueryQueue = new PullQueryQueue(analysis.getLimitClause());
final PullQueryQueuePopulator populator = () -> routing.handlePullQuery(serviceContext, physicalPlan, statement, routingOptions, physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
final PullQueryResult result = new PullQueryResult(physicalPlan.getOutputSchema(), populator, physicalPlan.getQueryId(), pullQueryQueue, pullQueryMetrics, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType, physicalPlan::getRowsReadFromDataSource, shouldCancelRequests, consistencyOffsetVector);
if (startImmediately) {
result.start();
}
return result;
} catch (final Exception e) {
if (plan == null) {
pullQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PullPhysicalPlan physicalPlan = plan;
pullQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute pull query {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information. If you " + "see this LOG message, please submit a GitHub ticket and we will scrub " + "the statement text from the error at {}", routingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute pull query. {} {}", routingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed pull query text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class QueryEngine method buildQueryLogicalPlan.
static OutputNode buildQueryLogicalPlan(final Query query, final Optional<Sink> sink, final MetaStore metaStore, final KsqlConfig config, final boolean rowpartitionRowoffsetEnabled, final String statementText) {
final String outputPrefix = config.getString(KsqlConfig.KSQL_OUTPUT_TOPIC_NAME_PREFIX_CONFIG);
final Boolean pullLimitClauseEnabled = config.getBoolean(KsqlConfig.KSQL_QUERY_PULL_LIMIT_CLAUSE_ENABLED);
final QueryAnalyzer queryAnalyzer = new QueryAnalyzer(metaStore, outputPrefix, rowpartitionRowoffsetEnabled, pullLimitClauseEnabled);
final Analysis analysis;
try {
analysis = queryAnalyzer.analyze(query, sink);
} catch (final KsqlException e) {
throw new KsqlStatementException(e.getMessage(), statementText, e);
}
return new LogicalPlanner(config, analysis, metaStore).buildPersistentLogicalPlan();
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlEngine method analyzeQueryWithNoOutputTopic.
/**
* For analyzing queries that you know won't have an output topic, such as pull queries.
*/
public ImmutableAnalysis analyzeQueryWithNoOutputTopic(final Query query, final String queryText, final Map<String, Object> configOverrides) {
final KsqlConfig ksqlConfig = this.primaryContext.getKsqlConfig();
final QueryAnalyzer queryAnalyzer = new QueryAnalyzer(getMetaStore(), "", getRowpartitionRowoffsetEnabled(ksqlConfig, configOverrides), ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_LIMIT_CLAUSE_ENABLED));
final Analysis analysis;
try {
analysis = queryAnalyzer.analyze(query, Optional.empty());
} catch (final KsqlException e) {
throw new KsqlStatementException(e.getMessage(), queryText, e);
}
return new RewrittenAnalysis(analysis, new QueryExecutionUtil.ColumnReferenceRewriter()::process);
}
Aggregations