use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class KsqlContext method create.
/**
* Create a KSQL context object with the given properties. A KSQL context has it's own metastore
* valid during the life of the object.
*/
public static KsqlContext create(final KsqlConfig ksqlConfig, final ProcessingLogContext processingLogContext, final MetricCollectors metricCollectors) {
Objects.requireNonNull(ksqlConfig, "ksqlConfig cannot be null.");
final ServiceContext serviceContext = ServiceContextFactory.create(ksqlConfig, DisabledKsqlClient::instance);
final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry();
UserFunctionLoader.newInstance(ksqlConfig, functionRegistry, ".", metricCollectors.getMetrics()).load();
final ServiceInfo serviceInfo = ServiceInfo.create(ksqlConfig);
final KsqlEngine engine = new KsqlEngine(serviceContext, processingLogContext, functionRegistry, serviceInfo, new SequentialQueryIdGenerator(), ksqlConfig, Collections.emptyList(), metricCollectors);
return new KsqlContext(serviceContext, ksqlConfig, engine, Injectors.DEFAULT);
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class KsqlEngineTestUtil method execute.
@SuppressWarnings({ "rawtypes", "unchecked" })
private static ExecuteResult execute(final ServiceContext serviceContext, final KsqlExecutionContext executionContext, final ParsedStatement stmt, final KsqlConfig ksqlConfig, final Map<String, Object> overriddenProperties, final Optional<DefaultSchemaInjector> schemaInjector) {
final PreparedStatement<?> prepared = executionContext.prepare(stmt);
final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, overriddenProperties));
final ConfiguredStatement<?> withFormats = new DefaultFormatInjector().inject(configured);
final ConfiguredStatement<?> withSchema = schemaInjector.map(injector -> injector.inject(withFormats)).orElse((ConfiguredStatement) withFormats);
final ConfiguredStatement<?> reformatted = new SqlFormatInjector(executionContext).inject(withSchema);
try {
return executionContext.execute(serviceContext, reformatted);
} catch (final KsqlStatementException e) {
// can easily check that the failed statement is the input statement
throw new KsqlStatementException(e.getRawMessage(), stmt.getStatementText(), e.getCause());
}
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class KsqlContextTestUtil method create.
public static KsqlContext create(final KsqlConfig ksqlConfig, final SchemaRegistryClient schemaRegistryClient, final FunctionRegistry functionRegistry) {
final KafkaClientSupplier clientSupplier = new DefaultKafkaClientSupplier();
final Admin adminClient = clientSupplier.getAdmin(ksqlConfig.getKsqlAdminClientConfigProps());
final KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(() -> adminClient);
final ServiceContext serviceContext = TestServiceContext.create(clientSupplier, adminClient, kafkaTopicClient, () -> schemaRegistryClient, new DefaultConnectClientFactory(ksqlConfig).get(Optional.empty(), Collections.emptyList(), Optional.empty()));
final String metricsPrefix = "instance-" + COUNTER.getAndIncrement() + "-";
final KsqlEngine engine = new KsqlEngine(serviceContext, ProcessingLogContext.create(), functionRegistry, ServiceInfo.create(ksqlConfig, metricsPrefix), new SequentialQueryIdGenerator(), ksqlConfig, Collections.emptyList(), new MetricCollectors());
return new KsqlContext(serviceContext, ksqlConfig, engine, Injectors.DEFAULT);
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class TestExecutorUtil method buildStreamsTopologyTestDrivers.
static List<TopologyTestDriverContainer> buildStreamsTopologyTestDrivers(final TestCase testCase, final ServiceContext serviceContext, final KsqlEngine ksqlEngine, final KsqlConfig ksqlConfig, final StubKafkaService stubKafkaService, final TestExecutionListener listener) {
final KsqlConfig maybeUpdatedConfigs = testCase.applyPersistedProperties(ksqlConfig);
final List<PersistentQueryAndSources> queryMetadataList = doBuildQueries(testCase, serviceContext, ksqlEngine, maybeUpdatedConfigs, stubKafkaService, listener);
final List<TopologyTestDriverContainer> topologyTestDrivers = new ArrayList<>();
for (final PersistentQueryAndSources persistentQueryAndSources : queryMetadataList) {
final PersistentQueryMetadata persistentQueryMetadata = persistentQueryAndSources.getPersistentQueryMetadata();
final Properties streamsProperties = new Properties();
streamsProperties.putAll(persistentQueryMetadata.getStreamsProperties());
final Topology topology = persistentQueryMetadata.getTopology();
final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(topology, streamsProperties, Instant.EPOCH);
final List<Topic> sourceTopics = persistentQueryAndSources.getSources().stream().map(dataSource -> {
stubKafkaService.requireTopicExists(dataSource.getKafkaTopicName());
return stubKafkaService.getTopic(dataSource.getKafkaTopicName());
}).collect(Collectors.toList());
final Optional<Topic> sinkTopic = persistentQueryMetadata.getSinkName().map(name -> buildSinkTopic(ksqlEngine.getMetaStore().getSource(name), stubKafkaService, serviceContext.getSchemaRegistryClient()));
testCase.setGeneratedTopologies(ImmutableList.of(persistentQueryMetadata.getTopologyDescription()));
testCase.setGeneratedSchemas(persistentQueryMetadata.getQuerySchemas().getLoggerSchemaInfo());
topologyTestDrivers.add(TopologyTestDriverContainer.of(topologyTestDriver, sourceTopics, sinkTopic));
}
return topologyTestDrivers;
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class LocalCommandsTest method shouldWriteAppIdToCommandFile.
@Test
public void shouldWriteAppIdToCommandFile() throws IOException {
// Given
final File dir = commandsDir.newFolder();
LocalCommands localCommands = LocalCommands.open(ksqlEngine, dir);
File processedFile = localCommands.getCurrentLocalCommandsFile();
// When
localCommands.write(metadata1);
localCommands.write(metadata2);
// Then
// Need to create a new local commands in order not to skip the "current" file we just wrote.
localCommands = LocalCommands.open(ksqlEngine, dir);
localCommands.write(metadata3);
localCommands.processLocalCommandFiles(serviceContext);
verify(ksqlEngine).cleanupOrphanedInternalTopics(any(), eq(ImmutableSet.of(QUERY_APP_ID1, QUERY_APP_ID2)));
List<Path> paths = Files.list(dir.toPath()).collect(Collectors.toList());
String expectedProcessedFileName = processedFile.getAbsolutePath() + LOCAL_COMMANDS_PROCESSED_SUFFIX;
assertThat(paths.size(), is(2));
assertThat(paths.stream().anyMatch(path -> path.toFile().getAbsolutePath().equals(expectedProcessedFileName)), is(true));
}
Aggregations