use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.
the class WindowingIntTest method shouldAggregateWithNoWindow.
@Test
public void shouldAggregateWithNoWindow() throws Exception {
testHarness.publishTestData(topicName, dataProvider, now);
final String streamName = "NOWINDOW_AGGTEST";
final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)");
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2, /**
* 2 x items *
*/
20.0));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
final Map<String, GenericRow> aggregateResults = testHarness.consumeData(streamName, resultSchema, 1, new StringDeserializer(), MAX_POLL_PER_ITERATION);
final GenericRow actual = aggregateResults.get("ITEM_1");
return expected.equals(actual);
}, 60000, "didn't receive correct results within timeout");
AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
Set<String> topicBeforeCleanup = topicClient.listTopicNames();
assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
queryMetadata.close();
Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.COMPACT));
}
use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.
the class WindowingIntTest method shouldAggregateSessionWindow.
@Test
public void shouldAggregateSessionWindow() throws Exception {
testHarness.publishTestData(topicName, dataProvider, now);
final String streamName = "SESSION_AGGTEST";
final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s GROUP BY ORDERID;", streamName, "ORDERID, COUNT(*), SUM(ORDERUNITS)", "SESSION (10 SECONDS)");
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
GenericRow expectedResults = new GenericRow(Arrays.asList(null, null, "ORDER_6", 6, /**
* 2 x items *
*/
420.0));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, datasetOneMetaData.size(), new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
updateResults(results, windowedResults);
final GenericRow actual = results.get("ORDER_6");
return expectedResults.equals(actual) && results.size() == 6;
}, 60000, "didn't receive correct results within timeout");
AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
Set<String> topicBeforeCleanup = topicClient.listTopicNames();
assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
queryMetadata.close();
Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.
the class WindowingIntTest method shouldAggregateHoppingWindow.
@Test
public void shouldAggregateHoppingWindow() throws Exception {
testHarness.publishTestData(topicName, dataProvider, now);
final String streamName = "HOPPING_AGGTEST";
final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)", "HOPPING ( SIZE 10 SECONDS, ADVANCE BY 5 SECONDS)");
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2, /**
* 2 x items *
*/
20.0));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
updateResults(results, windowedResults);
final GenericRow actual = results.get("ITEM_1");
return expected.equals(actual);
}, 60000, "didn't receive correct results within timeout");
AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
Set<String> topicBeforeCleanup = topicClient.listTopicNames();
assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
queryMetadata.close();
Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.
the class JoinNodeTest method shouldBuildTableNodeWithCorrectAutoCommitOffsetPolicy.
@Test
public void shouldBuildTableNodeWithCorrectAutoCommitOffsetPolicy() {
setupTopicClientExpectations(1, 1);
buildJoin();
KsqlConfig ksqlConfig = mock(KsqlConfig.class);
KafkaTopicClient kafkaTopicClient = mock(KafkaTopicClient.class);
FunctionRegistry functionRegistry = mock(FunctionRegistry.class);
class RightTable extends PlanNode {
final Schema schema;
public RightTable(final PlanNodeId id, Schema schema) {
super(id);
this.schema = schema;
}
@Override
public Schema getSchema() {
return schema;
}
@Override
public Field getKeyField() {
return null;
}
@Override
public List<PlanNode> getSources() {
return null;
}
@Override
public SchemaKStream buildStream(StreamsBuilder builder, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient, FunctionRegistry functionRegistry, Map<String, Object> props, SchemaRegistryClient schemaRegistryClient) {
if (props.containsKey(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) && props.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toString().equalsIgnoreCase("EARLIEST")) {
return mock(SchemaKTable.class);
} else {
throw new KsqlException("auto.offset.reset should be set to EARLIEST.");
}
}
@Override
protected int getPartitions(KafkaTopicClient kafkaTopicClient) {
return 1;
}
}
RightTable rightTable = new RightTable(new PlanNodeId("1"), joinNode.getRight().getSchema());
JoinNode testJoinNode = new JoinNode(joinNode.getId(), joinNode.getType(), joinNode.getLeft(), rightTable, joinNode.getLeftKeyFieldName(), joinNode.getRightKeyFieldName(), joinNode.getLeftAlias(), joinNode.getRightAlias());
testJoinNode.tableForJoin(builder, ksqlConfig, kafkaTopicClient, functionRegistry, new HashMap<>(), new MockSchemaRegistryClient());
}
use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.
the class KsqlStructuredDataOutputNodeTest method shouldCreateSinkWithCorrectCleanupPolicyStream.
@Test
public void shouldCreateSinkWithCorrectCleanupPolicyStream() {
KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class);
StreamsBuilder streamsBuilder = new StreamsBuilder();
topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap());
EasyMock.replay(topicClientForWindowTable);
SchemaKStream schemaKStream = outputNode.buildStream(streamsBuilder, ksqlConfig, topicClientForWindowTable, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient());
assertThat(schemaKStream, instanceOf(SchemaKStream.class));
EasyMock.verify();
}
Aggregations