use of io.confluent.ksql.schema.ksql.PhysicalSchema in project ksql by confluentinc.
the class JoinIntTest method assertExpectedResults.
private void assertExpectedResults(final Map<GenericKey, GenericRow> expectedResults, final DataSource outputSource, final String inputTopic, final Format inputKeyFormat, final Format inputValueFormat, final long timeoutMs) {
final PhysicalSchema schema = PhysicalSchema.from(outputSource.getSchema(), outputSource.getKsqlTopic().getKeyFormat().getFeatures(), outputSource.getKsqlTopic().getValueFormat().getFeatures());
final Map<GenericKey, GenericRow> results = new HashMap<>();
assertThatEventually("failed to complete join correctly", () -> {
results.putAll(TEST_HARNESS.verifyAvailableUniqueRows(outputSource.getKafkaTopicName(), 1, FormatFactory.of(outputSource.getKsqlTopic().getKeyFormat().getFormatInfo()), FormatFactory.of(outputSource.getKsqlTopic().getValueFormat().getFormatInfo()), schema));
final boolean success = results.equals(expectedResults);
if (!success) {
try {
// The join may not be triggered fist time around due to order in which the
// consumer pulls the records back. So we publish again to make the stream
// trigger the join.
TEST_HARNESS.produceRows(inputTopic, ORDER_DATA_PROVIDER, inputKeyFormat, inputValueFormat, () -> now);
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
return success;
}, is(true), timeoutMs, TimeUnit.MILLISECONDS);
assertThat(results, is(expectedResults));
}
use of io.confluent.ksql.schema.ksql.PhysicalSchema in project ksql by confluentinc.
the class ReplaceWithSharedRuntimesIntTest method assertForSource.
private void assertForSource(final String sourceName, final String topic, final Map<GenericKey, GenericRow> expected) {
DataSource source = ksqlContext.getMetaStore().getSource(SourceName.of(sourceName));
PhysicalSchema resultSchema = PhysicalSchema.from(source.getSchema(), source.getKsqlTopic().getKeyFormat().getFeatures(), source.getKsqlTopic().getValueFormat().getFeatures());
assertThat(TEST_HARNESS.verifyAvailableUniqueRows(topic, expected.size(), FormatFactory.KAFKA, FormatFactory.JSON, resultSchema), is(expected));
}
use of io.confluent.ksql.schema.ksql.PhysicalSchema in project ksql by confluentinc.
the class RuntimeBuildContextTest method shouldFailWhenTackingSerdeOnSchemaMismatch.
@Test
public void shouldFailWhenTackingSerdeOnSchemaMismatch() {
// Given:
runtimeBuildContext.buildKeySerde(FORMAT_INFO, PHYSICAL_SCHEMA, queryContext);
final PhysicalSchema differentSchema = PhysicalSchema.from(LogicalSchema.builder().valueColumn(ColumnName.of("f0"), SqlTypes.BOOLEAN).build(), SerdeFeatures.of(), SerdeFeatures.of());
// When:
assertThrows(IllegalStateException.class, () -> runtimeBuildContext.buildValueSerde(FORMAT_INFO, differentSchema, queryContext));
}
use of io.confluent.ksql.schema.ksql.PhysicalSchema in project ksql by confluentinc.
the class MaterializationUtil method buildMaterialized.
static <K> Materialized<K, GenericRow, KeyValueStore<Bytes, byte[]>> buildMaterialized(final ExecutionStep<?> step, final LogicalSchema aggregateSchema, final Formats formats, final RuntimeBuildContext buildContext, final MaterializedFactory materializedFactory, final ExecutionKeyFactory<K> executionKeyFactory) {
final PhysicalSchema physicalAggregationSchema = PhysicalSchema.from(aggregateSchema, formats.getKeyFeatures(), formats.getValueFeatures());
final QueryContext queryContext = MaterializationUtil.materializeContext(step);
final Serde<K> keySerde = buildKeySerde(formats, physicalAggregationSchema, queryContext, executionKeyFactory);
final Serde<GenericRow> valueSerde = buildValueSerde(formats, buildContext, physicalAggregationSchema, queryContext);
return materializedFactory.create(keySerde, valueSerde, StreamsUtil.buildOpName(queryContext));
}
use of io.confluent.ksql.schema.ksql.PhysicalSchema in project ksql by confluentinc.
the class SinkBuilder method build.
public static <K> void build(final LogicalSchema schema, final Formats formats, final Optional<TimestampColumn> timestampColumn, final String topicName, final KStream<K, GenericRow> stream, final ExecutionKeyFactory<K> executionKeyFactory, final QueryContext queryContext, final RuntimeBuildContext buildContext) {
final PhysicalSchema physicalSchema = PhysicalSchema.from(schema, formats.getKeyFeatures(), formats.getValueFeatures());
final Serde<K> keySerde = executionKeyFactory.buildKeySerde(formats.getKeyFormat(), physicalSchema, queryContext);
final Serde<GenericRow> valueSerde = buildContext.buildValueSerde(formats.getValueFormat(), physicalSchema, queryContext);
final Optional<TransformTimestamp<K>> tsTransformer = timestampTransformer(buildContext, queryContext, schema, timestampColumn);
final KStream<K, GenericRow> transformed = tsTransformer.map(t -> stream.transform(t, Named.as(TIMESTAMP_TRANSFORM_NAME + StreamsUtil.buildOpName(queryContext)))).orElse(stream);
transformed.to(topicName, Produced.with(keySerde, valueSerde));
}
Aggregations