use of io.confluent.ksql.test.model.SourceNode in project ksql by confluentinc.
the class TestExecutor method buildAndExecuteQuery.
public void buildAndExecuteQuery(final TestCase testCase, final TestExecutionListener listener) {
topicInfoCache.clear();
final KsqlConfig ksqlConfig = testCase.applyPersistedProperties(new KsqlConfig(config));
try {
System.setProperty(RuntimeBuildContext.KSQL_TEST_TRACK_SERDE_TOPICS, "true");
final List<TopologyTestDriverContainer> topologyTestDrivers = topologyBuilder.buildStreamsTopologyTestDrivers(testCase, serviceContext, ksqlEngine, ksqlConfig, kafka, listener);
writeInputIntoTopics(testCase.getInputRecords(), kafka);
final Set<String> inputTopics = testCase.getInputRecords().stream().map(Record::getTopicName).collect(Collectors.toSet());
for (final TopologyTestDriverContainer topologyTestDriverContainer : topologyTestDrivers) {
if (validateResults) {
verifyTopology(testCase);
}
final Set<String> topicsFromInput = topologyTestDriverContainer.getSourceTopicNames().stream().filter(inputTopics::contains).collect(Collectors.toSet());
final Set<String> topicsFromKafka = topologyTestDriverContainer.getSourceTopicNames().stream().filter(topicName -> !inputTopics.contains(topicName)).collect(Collectors.toSet());
if (!topicsFromInput.isEmpty()) {
pipeRecordsFromProvidedInput(testCase, topologyTestDriverContainer);
}
for (final String kafkaTopic : topicsFromKafka) {
pipeRecordsFromKafka(kafkaTopic, topologyTestDriverContainer);
}
topologyTestDriverContainer.getTopologyTestDriver().producedTopicNames().forEach(topicInfoCache::get);
}
verifyOutput(testCase);
testCase.expectedException().map(ee -> {
throw new AssertionError("Expected test to throw" + StringDescription.toString(ee));
});
kafka.getAllTopics().stream().map(Topic::getName).forEach(topicInfoCache::get);
final List<PostTopicNode> knownTopics = topicInfoCache.all().stream().map(ti -> {
final Topic topic = kafka.getTopic(ti.getTopicName());
final OptionalInt partitions = topic == null ? OptionalInt.empty() : OptionalInt.of(topic.getNumPartitions());
final Optional<SchemaMetadata> keyMetadata = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), ti.getTopicName(), true);
final Optional<SchemaMetadata> valueMetadata = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), ti.getTopicName(), false);
return new PostTopicNode(ti.getTopicName(), ti.getKeyFormat(), ti.getValueFormat(), partitions, fromSchemaMetadata(keyMetadata), fromSchemaMetadata(valueMetadata));
}).collect(Collectors.toList());
final List<SourceNode> knownSources = ksqlEngine.getMetaStore().getAllDataSources().values().stream().map(SourceNode::fromDataSource).collect(Collectors.toList());
if (validateResults) {
testCase.getPostConditions().verify(ksqlEngine.getMetaStore(), knownTopics);
}
listener.runComplete(knownTopics, knownSources);
} catch (final RuntimeException e) {
final Optional<Matcher<Throwable>> expectedExceptionMatcher = testCase.expectedException();
if (!expectedExceptionMatcher.isPresent()) {
throw e;
}
assertThat(e, isThrowable(expectedExceptionMatcher.get()));
} finally {
System.clearProperty(RuntimeBuildContext.KSQL_TEST_TRACK_SERDE_TOPICS);
}
}
Aggregations