use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class ReplaceIntTest method shouldReplaceSimpleFilter.
@Test
public void shouldReplaceSimpleFilter() {
// Given:
final String outputTopic = TopicTestUtil.uniqueTopicName("");
ksqlContext.sql(String.format("CREATE STREAM project WITH(kafka_topic='%s') AS SELECT k, col1 FROM source;", outputTopic));
TEST_HARNESS.produceRows(inputTopic, new Provider("1", "A", 1), FormatFactory.KAFKA, FormatFactory.JSON);
assertForSource("PROJECT", outputTopic, ImmutableMap.of(genericKey("1"), GenericRow.genericRow("A")));
// When:
ksqlContext.sql(String.format("CREATE OR REPLACE STREAM project WITH(kafka_topic='%s') AS SELECT k, col1 FROM source WHERE col1 <> 'A';", outputTopic));
TEST_HARNESS.produceRows(inputTopic, new Provider("2", "A", 2), FormatFactory.KAFKA, // this row should be filtered out
FormatFactory.JSON);
TEST_HARNESS.produceRows(inputTopic, new Provider("3", "C", 3), FormatFactory.KAFKA, FormatFactory.JSON);
// Then:
final Map<GenericKey, GenericRow> expected = ImmutableMap.of(genericKey("1"), GenericRow.genericRow("A"), // this row is an artifact from the new query
genericKey("3"), // this row is an artifact from the new query
GenericRow.genericRow("C"));
assertForSource("PROJECT", outputTopic, expected);
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class DataGenProducer method populateTopic.
@SuppressWarnings("InfiniteLoopStatement")
public void populateTopic(final Properties props, final Generator generator, final String kafkaTopicName, final String key, final Optional<String> timestampColumnName, final int messageCount, final boolean printRows, final Optional<RateLimiter> rateLimiter) {
final Schema avroSchema = generator.schema();
if (avroSchema.getField(key) == null) {
throw new IllegalArgumentException("Key field does not exist: " + key);
}
validateTimestampColumnType(timestampColumnName, avroSchema);
final RowGenerator rowGenerator = new RowGenerator(generator, key, timestampColumnName);
final Serializer<GenericKey> keySerializer = getKeySerializer(rowGenerator.schema());
final Serializer<GenericRow> valueSerializer = getValueSerializer(rowGenerator.schema());
final KafkaProducer<GenericKey, GenericRow> producer = new KafkaProducer<>(props, keySerializer, valueSerializer);
if (messageCount != -1) {
for (int i = 0; i < messageCount; i++) {
produceOne(rowGenerator, producer, kafkaTopicName, printRows, rateLimiter);
}
} else {
while (true) {
produceOne(rowGenerator, producer, kafkaTopicName, printRows, rateLimiter);
}
}
producer.flush();
producer.close();
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class TestDriverPipelineTest method shouldHandleOneOutputIsInputToTwoTopologies.
@Test
public void shouldHandleOneOutputIsInputToTwoTopologies() {
// Given:
final TopologyTestDriver driver1 = mock(TopologyTestDriver.class);
final TopologyTestDriver driver2 = mock(TopologyTestDriver.class);
final TopologyTestDriver driver3 = mock(TopologyTestDriver.class);
final TestInputTopic<GenericKey, GenericRow> inA = givenInput(driver1, "a");
givenOutput(driver1, "b");
final TestInputTopic<GenericKey, GenericRow> inB2 = givenInput(driver2, "b");
givenOutput(driver2, "c");
final TestInputTopic<GenericKey, GenericRow> inB3 = givenInput(driver3, "b");
givenOutput(driver3, "d");
givenPipe(inA, "b");
givenPipe(inB2, "c");
givenPipe(inB3, "d");
pipeline.addDriver(driver1, ImmutableList.of(inf("a")), inf("b"));
pipeline.addDriver(driver2, ImmutableList.of(inf("b")), inf("c"));
pipeline.addDriver(driver3, ImmutableList.of(inf("b")), inf("d"));
// When:
pipeline.pipeInput("a", KEY, ROW1, 1);
// Then:
assertThat(pipeline.getAllRecordsForTopic("b"), is(ImmutableList.of(new TestRecord<>(KEY, ROW1, Instant.ofEpochMilli(1)))));
assertThat(pipeline.getAllRecordsForTopic("c"), is(ImmutableList.of(new TestRecord<>(KEY, ROW1, Instant.ofEpochMilli(1)))));
assertThat(pipeline.getAllRecordsForTopic("d"), is(ImmutableList.of(new TestRecord<>(KEY, ROW1, Instant.ofEpochMilli(1)))));
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class SchemaKGroupedTable method aggregate.
@Override
public SchemaKTable<GenericKey> aggregate(final List<ColumnName> nonAggregateColumns, final List<FunctionCall> aggregations, final Optional<WindowExpression> windowExpression, final FormatInfo valueFormat, final Stacker contextStacker) {
if (windowExpression.isPresent()) {
throw new KsqlException("Windowing not supported for table aggregations.");
}
final List<String> unsupportedFunctionNames = aggregations.stream().map(call -> UdafUtil.resolveAggregateFunction(functionRegistry, call, schema, ksqlConfig)).filter(function -> !(function instanceof TableAggregationFunction)).map(KsqlAggregateFunction::name).map(FunctionName::text).distinct().collect(Collectors.toList());
if (!unsupportedFunctionNames.isEmpty()) {
final String postfix = unsupportedFunctionNames.size() == 1 ? "" : "s";
throw new KsqlException("The aggregation function" + postfix + " " + GrammaticalJoiner.and().join(unsupportedFunctionNames) + " cannot be applied to a table source, only to a stream source.");
}
final TableAggregate step = ExecutionStepFactory.tableAggregate(contextStacker, sourceTableStep, InternalFormats.of(keyFormat, valueFormat), nonAggregateColumns, aggregations);
return new SchemaKTable<>(step, resolveSchema(step), keyFormat, ksqlConfig, functionRegistry);
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class RowUtil method createRow.
/**
* Takes a raw key object read from the topic and returns the appropriate row, depending on
* whether it's windowed or not.
* @param key The key object
* @param value The value
* @param timestamp The timestamp of the row
* @param windowed If the data is known to have a windowed key
* @param logicalSchema The logical schema of the data
* @return The appropriate row object containing the data
*/
@SuppressWarnings("unchecked")
public static QueryRow createRow(final Object key, final GenericRow value, final long timestamp, final boolean windowed, final LogicalSchema logicalSchema) {
if (!windowed) {
final GenericKey keyCopy = GenericKey.fromList(key != null ? ((GenericKey) key).values() : Collections.emptyList());
final GenericRow valueCopy = GenericRow.fromList(value.values());
return QueryRowImpl.of(logicalSchema, keyCopy, Optional.empty(), valueCopy, timestamp);
} else {
final Windowed<GenericKey> windowedKey = (Windowed<GenericKey>) key;
final GenericKey keyCopy = GenericKey.fromList(windowedKey.key().values());
final GenericRow valueCopy = GenericRow.fromList(value.values());
return QueryRowImpl.of(logicalSchema, keyCopy, Optional.of(Window.of(windowedKey.window().startTime(), windowedKey.window().endTime())), valueCopy, timestamp);
}
}
Aggregations