use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldQueryMaterializedTableForTumblingWindowed.
@Test
public void shouldQueryMaterializedTableForTumblingWindowed() {
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*) AS COUNT FROM " + USER_STREAM + " WINDOW TUMBLING (SIZE " + WINDOW_SIZE.getSeconds() + " SECONDS)" + " GROUP BY USERID;");
final LogicalSchema schema = schema("COUNT", SqlTypes.BIGINT);
final Map<Windowed<String>, GenericRow> rows = waitForUniqueUserRows(TIME_WINDOWED_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
assertThat(materialization.windowType(), is(Optional.of(WindowType.TUMBLING)));
final MaterializedWindowedTable table = materialization.windowed();
rows.forEach((k, v) -> {
final Window w = Window.of(k.window().startTime(), k.window().endTime());
final GenericKey key = genericKey(k.key());
final List<WindowedRow> resultAtWindowStart = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.singleton(w.start()), Range.all())));
assertThat("at exact window start", resultAtWindowStart, hasSize(1));
assertThat(resultAtWindowStart.get(0).schema(), is(schema));
assertThat(resultAtWindowStart.get(0).window(), is(Optional.of(w)));
assertThat(resultAtWindowStart.get(0).key(), is(key));
assertThat(resultAtWindowStart.get(0).value(), is(v));
final List<WindowedRow> resultAtWindowEnd = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.all(), Range.singleton(w.end()))));
assertThat("at exact window end", resultAtWindowEnd, hasSize(1));
final List<WindowedRow> resultFromRange = withRetry(() -> withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.closed(w.start().minusMillis(1), w.start().plusMillis(1)), Range.all()))));
assertThat("range including window start", resultFromRange, is(resultAtWindowStart));
final List<WindowedRow> resultPast = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.closed(w.start().plusMillis(1), w.start().plusMillis(1)), Range.all())));
assertThat("past start", resultPast, is(empty()));
});
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class JsonFormatTest method produceInitData.
private static void produceInitData() {
TEST_HARNESS.produceRows(inputTopic, ORDER_DATA_PROVIDER, KAFKA, JSON);
final LogicalSchema messageSchema = LogicalSchema.builder().keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING).valueColumn(ColumnName.of("MESSAGE"), SqlTypes.STRING).build();
final GenericKey messageKey = genericKey("1");
final GenericRow messageRow = genericRow("{\"log\":{\"@timestamp\":\"2017-05-30T16:44:22.175Z\",\"@version\":\"1\"," + "\"caasVersion\":\"0.0.2\",\"cloud\":\"aws\",\"logs\":[{\"entry\":\"first\"}],\"clusterId\":\"cp99\",\"clusterName\":\"kafka\",\"cpComponentId\":\"kafka\",\"host\":\"kafka-1-wwl0p\",\"k8sId\":\"k8s13\",\"k8sName\":\"perf\",\"level\":\"ERROR\",\"logger\":\"kafka.server.ReplicaFetcherThread\",\"message\":\"Found invalid messages during fetch for partition [foo512,172] offset 0 error Record is corrupt (stored crc = 1321230880, computed crc = 1139143803)\",\"networkId\":\"vpc-d8c7a9bf\",\"region\":\"us-west-2\",\"serverId\":\"1\",\"skuId\":\"sku5\",\"source\":\"kafka\",\"tenantId\":\"t47\",\"tenantName\":\"perf-test\",\"thread\":\"ReplicaFetcherThread-0-2\",\"zone\":\"us-west-2a\"},\"stream\":\"stdout\",\"time\":2017}");
final Map<GenericKey, GenericRow> records = new HashMap<>();
records.put(messageKey, messageRow);
final PhysicalSchema schema = PhysicalSchema.from(messageSchema, SerdeFeatures.of(), SerdeFeatures.of());
TEST_HARNESS.produceRows(messageLogTopic, records.entrySet(), schema, KAFKA, JSON);
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class ReplaceIntTest method shouldReplaceSimpleProject.
@Test
public void shouldReplaceSimpleProject() {
// Given:
final String outputTopic = TopicTestUtil.uniqueTopicName("");
ksqlContext.sql(String.format("CREATE STREAM project WITH(kafka_topic='%s') AS SELECT k, col1 FROM source;", outputTopic));
TEST_HARNESS.produceRows(inputTopic, new Provider("1", "A", 1), FormatFactory.KAFKA, FormatFactory.JSON);
assertForSource("PROJECT", outputTopic, ImmutableMap.of(genericKey("1"), GenericRow.genericRow("A")));
// When:
ksqlContext.sql(String.format("CREATE OR REPLACE STREAM project WITH(kafka_topic='%s') AS SELECT k, col1, col2 FROM source;", outputTopic));
TEST_HARNESS.produceRows(inputTopic, new Provider("2", "B", 2), FormatFactory.KAFKA, FormatFactory.JSON);
// Then:
final Map<GenericKey, GenericRow> expected = ImmutableMap.of(genericKey("1"), GenericRow.genericRow("A", null), // this row is leftover from the original query
genericKey("2"), GenericRow.genericRow("B", 2));
assertForSource("PROJECT", outputTopic, expected);
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class JoinIntTest method assertExpectedResults.
private void assertExpectedResults(final Map<GenericKey, GenericRow> expectedResults, final DataSource outputSource, final String inputTopic, final Format inputKeyFormat, final Format inputValueFormat, final long timeoutMs) {
final PhysicalSchema schema = PhysicalSchema.from(outputSource.getSchema(), outputSource.getKsqlTopic().getKeyFormat().getFeatures(), outputSource.getKsqlTopic().getValueFormat().getFeatures());
final Map<GenericKey, GenericRow> results = new HashMap<>();
assertThatEventually("failed to complete join correctly", () -> {
results.putAll(TEST_HARNESS.verifyAvailableUniqueRows(outputSource.getKafkaTopicName(), 1, FormatFactory.of(outputSource.getKsqlTopic().getKeyFormat().getFormatInfo()), FormatFactory.of(outputSource.getKsqlTopic().getValueFormat().getFormatInfo()), schema));
final boolean success = results.equals(expectedResults);
if (!success) {
try {
// The join may not be triggered fist time around due to order in which the
// consumer pulls the records back. So we publish again to make the stream
// trigger the join.
TEST_HARNESS.produceRows(inputTopic, ORDER_DATA_PROVIDER, inputKeyFormat, inputValueFormat, () -> now);
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
return success;
}, is(true), timeoutMs, TimeUnit.MILLISECONDS);
assertThat(results, is(expectedResults));
}
use of io.confluent.ksql.GenericKey in project ksql by confluentinc.
the class ReplaceWithSharedRuntimesIntTest method shouldReplaceSimpleProject.
@Test
public void shouldReplaceSimpleProject() {
// Given:
final String outputTopic = TopicTestUtil.uniqueTopicName("");
ksqlContext.sql(String.format("CREATE STREAM project WITH(kafka_topic='%s') AS SELECT k, col1 FROM source;", outputTopic));
TEST_HARNESS.produceRows(inputTopic, new Provider("1", "A", 1), FormatFactory.KAFKA, FormatFactory.JSON);
assertForSource("PROJECT", outputTopic, ImmutableMap.of(genericKey("1"), GenericRow.genericRow("A")));
// When:
ksqlContext.sql(String.format("CREATE OR REPLACE STREAM project WITH(kafka_topic='%s') AS SELECT k, col1, col2 FROM source;", outputTopic));
TEST_HARNESS.produceRows(inputTopic, new Provider("2", "B", 2), FormatFactory.KAFKA, FormatFactory.JSON);
// Then:
final Map<GenericKey, GenericRow> expected = ImmutableMap.of(genericKey("1"), GenericRow.genericRow("A", null), // this row is leftover from the original query
genericKey("2"), GenericRow.genericRow("B", 2));
assertForSource("PROJECT", outputTopic, expected);
}
Aggregations