use of io.confluent.ksql.execution.plan.KTableHolder in project ksql by confluentinc.
the class SchemaKTable method selectKey.
@Override
public SchemaKTable<K> selectKey(final FormatInfo valueFormat, final List<Expression> keyExpression, final Optional<KeyFormat> forceInternalKeyFormat, final Stacker contextStacker, final boolean forceRepartition) {
final boolean repartitionNeeded = repartitionNeeded(keyExpression);
final boolean keyFormatChange = forceInternalKeyFormat.isPresent() && !forceInternalKeyFormat.get().equals(keyFormat);
if (!forceRepartition && !keyFormatChange && !repartitionNeeded) {
return this;
}
if (schema.key().size() > 1) {
// let's throw a better error message in the case of multi-column tables
throw new UnsupportedOperationException("Cannot repartition a TABLE source. If this is " + "a join, joins on tables with multiple columns is not yet supported.");
}
// differently (thus ensuring all keys are routed to the same partitions)
if (repartitionNeeded) {
throw new UnsupportedOperationException("Cannot repartition a TABLE source. " + "If this is a join, make sure that the criteria uses the TABLE's key column " + Iterables.getOnlyElement(schema.key()).name().text() + " instead of " + keyExpression);
}
if (keyFormat.isWindowed()) {
final String errorMsg = "Implicit repartitioning of windowed sources is not supported. " + "See https://github.com/confluentinc/ksql/issues/4385.";
final String additionalMsg = forceRepartition ? " As a result, ksqlDB does not support joins on windowed sources with " + "Schema-Registry-enabled key formats (AVRO, JSON_SR, PROTOBUF) at this time. " + "Please repartition your sources to use a different key format before performing " + "the join." : "";
throw new KsqlException(errorMsg + additionalMsg);
}
final KeyFormat newKeyFormat = SerdeFeaturesFactory.sanitizeKeyFormat(forceInternalKeyFormat.orElse(keyFormat), toSqlTypes(keyExpression), // logical schema changes are not supported
false);
final ExecutionStep<KTableHolder<K>> step = ExecutionStepFactory.tableSelectKey(contextStacker, sourceTableStep, InternalFormats.of(newKeyFormat, valueFormat), keyExpression);
return new SchemaKTable<>(step, resolveSchema(step), newKeyFormat, ksqlConfig, functionRegistry);
}
use of io.confluent.ksql.execution.plan.KTableHolder in project ksql by confluentinc.
the class SchemaKSourceFactory method buildWindowedTable.
private static SchemaKTable<?> buildWindowedTable(final PlanBuildContext buildContext, final DataSource dataSource, final Stacker contextStacker) {
final WindowInfo windowInfo = dataSource.getKsqlTopic().getKeyFormat().getWindowInfo().orElseThrow(IllegalArgumentException::new);
final int pseudoColumnVersionToUse = determinePseudoColumnVersionToUse(buildContext);
final SourceStep<KTableHolder<Windowed<GenericKey>>> step = ExecutionStepFactory.tableSourceWindowed(contextStacker, dataSource.getSchema(), dataSource.getKafkaTopicName(), Formats.from(dataSource.getKsqlTopic()), windowInfo, dataSource.getTimestampColumn(), pseudoColumnVersionToUse);
return schemaKTable(buildContext, resolveSchema(buildContext, step, dataSource), dataSource.getKsqlTopic().getKeyFormat(), step);
}
use of io.confluent.ksql.execution.plan.KTableHolder in project ksql by confluentinc.
the class QueryBuilder method buildTransientQuery.
@SuppressWarnings("ParameterNumber")
TransientQueryMetadata buildTransientQuery(final String statementText, final QueryId queryId, final Set<SourceName> sources, final ExecutionStep<?> physicalPlan, final String planSummary, final LogicalSchema schema, final OptionalInt limit, final Optional<WindowInfo> windowInfo, final boolean excludeTombstones, final QueryMetadata.Listener listener, final StreamsBuilder streamsBuilder, final Optional<ImmutableMap<TopicPartition, Long>> endOffsets, final MetricCollectors metricCollectors) {
final KsqlConfig ksqlConfig = config.getConfig(true);
final String applicationId = QueryApplicationId.build(ksqlConfig, false, queryId);
final RuntimeBuildContext runtimeBuildContext = buildContext(applicationId, queryId, streamsBuilder);
final Map<String, Object> streamsProperties = buildStreamsProperties(applicationId, Optional.of(queryId), metricCollectors, config.getConfig(true), processingLogContext);
final Object buildResult = buildQueryImplementation(physicalPlan, runtimeBuildContext);
final TransientQueryQueue queue = buildTransientQueryQueue(buildResult, limit, excludeTombstones, endOffsets);
final Topology topology = streamsBuilder.build(PropertiesUtil.asProperties(streamsProperties));
final TransientQueryMetadata.ResultType resultType = buildResult instanceof KTableHolder ? windowInfo.isPresent() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM;
return new TransientQueryMetadata(statementText, schema, sources, planSummary, queue, queryId, applicationId, topology, kafkaStreamsBuilder, streamsProperties, config.getOverrides(), ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG), ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_ERROR_MAX_QUEUE_SIZE), resultType, ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_INITIAL_MS), ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_MAX_MS), listener);
}
Aggregations