use of io.confluent.ksql.test.tools.stubs.StubKafkaService in project ksql by confluentinc.
the class TestExecutorUtil method buildStreamsTopologyTestDrivers.
static List<TopologyTestDriverContainer> buildStreamsTopologyTestDrivers(final TestCase testCase, final ServiceContext serviceContext, final KsqlEngine ksqlEngine, final KsqlConfig ksqlConfig, final StubKafkaService stubKafkaService, final TestExecutionListener listener) {
final KsqlConfig maybeUpdatedConfigs = testCase.applyPersistedProperties(ksqlConfig);
final List<PersistentQueryAndSources> queryMetadataList = doBuildQueries(testCase, serviceContext, ksqlEngine, maybeUpdatedConfigs, stubKafkaService, listener);
final List<TopologyTestDriverContainer> topologyTestDrivers = new ArrayList<>();
for (final PersistentQueryAndSources persistentQueryAndSources : queryMetadataList) {
final PersistentQueryMetadata persistentQueryMetadata = persistentQueryAndSources.getPersistentQueryMetadata();
final Properties streamsProperties = new Properties();
streamsProperties.putAll(persistentQueryMetadata.getStreamsProperties());
final Topology topology = persistentQueryMetadata.getTopology();
final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(topology, streamsProperties, Instant.EPOCH);
final List<Topic> sourceTopics = persistentQueryAndSources.getSources().stream().map(dataSource -> {
stubKafkaService.requireTopicExists(dataSource.getKafkaTopicName());
return stubKafkaService.getTopic(dataSource.getKafkaTopicName());
}).collect(Collectors.toList());
final Optional<Topic> sinkTopic = persistentQueryMetadata.getSinkName().map(name -> buildSinkTopic(ksqlEngine.getMetaStore().getSource(name), stubKafkaService, serviceContext.getSchemaRegistryClient()));
testCase.setGeneratedTopologies(ImmutableList.of(persistentQueryMetadata.getTopologyDescription()));
testCase.setGeneratedSchemas(persistentQueryMetadata.getQuerySchemas().getLoggerSchemaInfo());
topologyTestDrivers.add(TopologyTestDriverContainer.of(topologyTestDriver, sourceTopics, sinkTopic));
}
return topologyTestDrivers;
}
use of io.confluent.ksql.test.tools.stubs.StubKafkaService in project ksql by confluentinc.
the class TestExecutor method create.
/**
* Create instance.
*
* <p>If {@code validateResults} is {@code true} the test will fail if the results are as
* expected. This is the norm. If {@code false} the test will not fail if the results differ. This
* is useful when re-writing the historical plans.
*
* @param validateResults flag to indicate if results should be validated.
* @param extensionDir Optional extension directory.
* @return the executor.
*/
public static TestExecutor create(final boolean validateResults, final Optional<String> extensionDir) {
final StubKafkaService kafkaService = StubKafkaService.create();
final StubKafkaClientSupplier kafkaClientSupplier = new StubKafkaClientSupplier();
final ServiceContext serviceContext = getServiceContext(kafkaClientSupplier);
return new TestExecutor(kafkaService, serviceContext, getKsqlEngine(serviceContext, extensionDir), TestExecutorUtil::buildStreamsTopologyTestDrivers, validateResults);
}
Aggregations