Search in sources :

Example 6 with KTableHolder

use of io.confluent.ksql.execution.plan.KTableHolder in project ksql by confluentinc.

the class SchemaKTable method selectKey.

@Override
public SchemaKTable<K> selectKey(final FormatInfo valueFormat, final List<Expression> keyExpression, final Optional<KeyFormat> forceInternalKeyFormat, final Stacker contextStacker, final boolean forceRepartition) {
    final boolean repartitionNeeded = repartitionNeeded(keyExpression);
    final boolean keyFormatChange = forceInternalKeyFormat.isPresent() && !forceInternalKeyFormat.get().equals(keyFormat);
    if (!forceRepartition && !keyFormatChange && !repartitionNeeded) {
        return this;
    }
    if (schema.key().size() > 1) {
        // let's throw a better error message in the case of multi-column tables
        throw new UnsupportedOperationException("Cannot repartition a TABLE source. If this is " + "a join, joins on tables with multiple columns is not yet supported.");
    }
    // differently (thus ensuring all keys are routed to the same partitions)
    if (repartitionNeeded) {
        throw new UnsupportedOperationException("Cannot repartition a TABLE source. " + "If this is a join, make sure that the criteria uses the TABLE's key column " + Iterables.getOnlyElement(schema.key()).name().text() + " instead of " + keyExpression);
    }
    if (keyFormat.isWindowed()) {
        final String errorMsg = "Implicit repartitioning of windowed sources is not supported. " + "See https://github.com/confluentinc/ksql/issues/4385.";
        final String additionalMsg = forceRepartition ? " As a result, ksqlDB does not support joins on windowed sources with " + "Schema-Registry-enabled key formats (AVRO, JSON_SR, PROTOBUF) at this time. " + "Please repartition your sources to use a different key format before performing " + "the join." : "";
        throw new KsqlException(errorMsg + additionalMsg);
    }
    final KeyFormat newKeyFormat = SerdeFeaturesFactory.sanitizeKeyFormat(forceInternalKeyFormat.orElse(keyFormat), toSqlTypes(keyExpression), // logical schema changes are not supported
    false);
    final ExecutionStep<KTableHolder<K>> step = ExecutionStepFactory.tableSelectKey(contextStacker, sourceTableStep, InternalFormats.of(newKeyFormat, valueFormat), keyExpression);
    return new SchemaKTable<>(step, resolveSchema(step), newKeyFormat, ksqlConfig, functionRegistry);
}
Also used : KTableHolder(io.confluent.ksql.execution.plan.KTableHolder) KsqlException(io.confluent.ksql.util.KsqlException) KeyFormat(io.confluent.ksql.serde.KeyFormat)

Example 7 with KTableHolder

use of io.confluent.ksql.execution.plan.KTableHolder in project ksql by confluentinc.

the class SchemaKSourceFactory method buildWindowedTable.

private static SchemaKTable<?> buildWindowedTable(final PlanBuildContext buildContext, final DataSource dataSource, final Stacker contextStacker) {
    final WindowInfo windowInfo = dataSource.getKsqlTopic().getKeyFormat().getWindowInfo().orElseThrow(IllegalArgumentException::new);
    final int pseudoColumnVersionToUse = determinePseudoColumnVersionToUse(buildContext);
    final SourceStep<KTableHolder<Windowed<GenericKey>>> step = ExecutionStepFactory.tableSourceWindowed(contextStacker, dataSource.getSchema(), dataSource.getKafkaTopicName(), Formats.from(dataSource.getKsqlTopic()), windowInfo, dataSource.getTimestampColumn(), pseudoColumnVersionToUse);
    return schemaKTable(buildContext, resolveSchema(buildContext, step, dataSource), dataSource.getKsqlTopic().getKeyFormat(), step);
}
Also used : KTableHolder(io.confluent.ksql.execution.plan.KTableHolder) GenericKey(io.confluent.ksql.GenericKey) WindowInfo(io.confluent.ksql.serde.WindowInfo)

Example 8 with KTableHolder

use of io.confluent.ksql.execution.plan.KTableHolder in project ksql by confluentinc.

the class QueryBuilder method buildTransientQuery.

@SuppressWarnings("ParameterNumber")
TransientQueryMetadata buildTransientQuery(final String statementText, final QueryId queryId, final Set<SourceName> sources, final ExecutionStep<?> physicalPlan, final String planSummary, final LogicalSchema schema, final OptionalInt limit, final Optional<WindowInfo> windowInfo, final boolean excludeTombstones, final QueryMetadata.Listener listener, final StreamsBuilder streamsBuilder, final Optional<ImmutableMap<TopicPartition, Long>> endOffsets, final MetricCollectors metricCollectors) {
    final KsqlConfig ksqlConfig = config.getConfig(true);
    final String applicationId = QueryApplicationId.build(ksqlConfig, false, queryId);
    final RuntimeBuildContext runtimeBuildContext = buildContext(applicationId, queryId, streamsBuilder);
    final Map<String, Object> streamsProperties = buildStreamsProperties(applicationId, Optional.of(queryId), metricCollectors, config.getConfig(true), processingLogContext);
    final Object buildResult = buildQueryImplementation(physicalPlan, runtimeBuildContext);
    final TransientQueryQueue queue = buildTransientQueryQueue(buildResult, limit, excludeTombstones, endOffsets);
    final Topology topology = streamsBuilder.build(PropertiesUtil.asProperties(streamsProperties));
    final TransientQueryMetadata.ResultType resultType = buildResult instanceof KTableHolder ? windowInfo.isPresent() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM;
    return new TransientQueryMetadata(statementText, schema, sources, planSummary, queue, queryId, applicationId, topology, kafkaStreamsBuilder, streamsProperties, config.getOverrides(), ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG), ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_ERROR_MAX_QUEUE_SIZE), resultType, ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_INITIAL_MS), ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_MAX_MS), listener);
}
Also used : RuntimeBuildContext(io.confluent.ksql.execution.runtime.RuntimeBuildContext) KTableHolder(io.confluent.ksql.execution.plan.KTableHolder) KsqlConfig(io.confluent.ksql.util.KsqlConfig) Topology(org.apache.kafka.streams.Topology) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata)

Aggregations

KTableHolder (io.confluent.ksql.execution.plan.KTableHolder)8 RuntimeBuildContext (io.confluent.ksql.execution.runtime.RuntimeBuildContext)5 GenericRow (io.confluent.ksql.GenericRow)4 QueryContext (io.confluent.ksql.execution.context.QueryContext)4 LogicalSchema (io.confluent.ksql.schema.ksql.LogicalSchema)4 GenericKey (io.confluent.ksql.GenericKey)3 Stacker (io.confluent.ksql.execution.context.QueryContext.Stacker)3 Formats (io.confluent.ksql.execution.plan.Formats)3 KsTransformer (io.confluent.ksql.execution.streams.transform.KsTransformer)3 ProcessingLogger (io.confluent.ksql.logging.processing.ProcessingLogger)3 PhysicalSchema (io.confluent.ksql.schema.ksql.PhysicalSchema)3 KsqlConfig (io.confluent.ksql.util.KsqlConfig)3 Serde (org.apache.kafka.common.serialization.Serde)3 KTable (org.apache.kafka.streams.kstream.KTable)3 KeyFormat (io.confluent.ksql.serde.KeyFormat)2 WindowInfo (io.confluent.ksql.serde.WindowInfo)2 Optional (java.util.Optional)2 Bytes (org.apache.kafka.common.utils.Bytes)2 Materialized (org.apache.kafka.streams.kstream.Materialized)2 Named (org.apache.kafka.streams.kstream.Named)2