use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class RequestValidatorTest method shouldThrowIfServiceContextIsNotSandbox.
@Test
public void shouldThrowIfServiceContextIsNotSandbox() {
// Given:
serviceContext = mock(ServiceContext.class);
givenRequestValidator(ImmutableMap.of());
// When:
final Exception e = assertThrows(IllegalArgumentException.class, () -> validator.validate(serviceContext, ImmutableList.of(), sessionProperties, "sql"));
// Then:
assertThat(e.getMessage(), containsString("Expected sandbox"));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class ListSourceExecutorTest method shouldAddWarningsOnClientExceptionForTopicListing.
@Test
public void shouldAddWarningsOnClientExceptionForTopicListing() {
// Given:
final KsqlTable<?> table1 = engine.givenSource(DataSourceType.KTABLE, "table1");
final KsqlTable<?> table2 = engine.givenSource(DataSourceType.KTABLE, "table2");
final ServiceContext serviceContext = engine.getServiceContext();
serviceContext.getTopicClient().deleteTopics(ImmutableList.of("table1", "table2"));
// When:
final KsqlEntity entity = CUSTOM_EXECUTORS.listTables().execute((ConfiguredStatement<ListTables>) engine.configure("SHOW TABLES EXTENDED;"), SESSION_PROPERTIES, engine.getEngine(), serviceContext).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertSourceListWithWarning(entity, table1, table2);
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class ListSourceExecutorTest method shouldAddWarningOnClientExceptionForDescription.
@Test
public void shouldAddWarningOnClientExceptionForDescription() {
// Given:
final KsqlStream<?> stream1 = engine.givenSource(DataSourceType.KSTREAM, "STREAM1");
final ServiceContext serviceContext = engine.getServiceContext();
serviceContext.getTopicClient().deleteTopics(ImmutableList.of("STREAM1"));
// When:
final KsqlEntity entity = CUSTOM_EXECUTORS.showColumns().execute((ConfiguredStatement<ShowColumns>) engine.configure("DESCRIBE STREAM1 EXTENDED;"), SESSION_PROPERTIES, engine.getEngine(), serviceContext).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertThat(entity, instanceOf(SourceDescriptionEntity.class));
final SourceDescriptionEntity description = (SourceDescriptionEntity) entity;
assertThat(description.getSourceDescription(), equalTo(SourceDescriptionFactory.create(stream1, true, ImmutableList.of(), ImmutableList.of(), Optional.empty(), ImmutableList.of(), ImmutableList.of(), new MetricCollectors())));
assertThat(description.getWarnings(), contains(new KsqlWarning("Error from Kafka: unknown topic: STREAM1")));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class KsqlEngineTest method shouldHandleMultipleStatements.
@SuppressWarnings("unchecked")
@Test
public void shouldHandleMultipleStatements() {
// Given:
final String sql = "" + "-- single line comment\n" + "/*\n" + " Multi-line comment\n" + "*/\n" + "CREATE STREAM S0 (a INT, b VARCHAR) " + " WITH (kafka_topic='s0_topic', value_format='DELIMITED', key_format='KAFKA');\n" + "\n" + "CREATE TABLE T1 (f0 BIGINT PRIMARY KEY, f1 DOUBLE) " + " WITH (kafka_topic='t1_topic', value_format='JSON', key_format='KAFKA');\n" + "\n" + "CREATE STREAM S1 AS SELECT * FROM S0;\n" + "\n" + "CREATE STREAM S2 AS SELECT * FROM S0;\n" + "\n" + "DROP TABLE T1;";
givenTopicsExist("s0_topic", "t1_topic");
final List<QueryMetadata> queries = new ArrayList<>();
// When:
final List<PreparedStatement<?>> preparedStatements = ksqlEngine.parse(sql).stream().map(stmt -> {
final PreparedStatement<?> prepared = ksqlEngine.prepare(stmt);
final ExecuteResult result = ksqlEngine.execute(serviceContext, ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, new HashMap<>())));
result.getQuery().ifPresent(queries::add);
return prepared;
}).collect(Collectors.toList());
// Then:
final List<?> statements = preparedStatements.stream().map(PreparedStatement::getStatement).collect(Collectors.toList());
assertThat(statements, contains(instanceOf(CreateStream.class), instanceOf(CreateTable.class), instanceOf(CreateStreamAsSelect.class), instanceOf(CreateStreamAsSelect.class), instanceOf(DropTable.class)));
assertThat(queries, hasSize(2));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class TestExecutor method buildAndExecuteQuery.
public void buildAndExecuteQuery(final TestCase testCase, final TestExecutionListener listener) {
topicInfoCache.clear();
final KsqlConfig ksqlConfig = testCase.applyPersistedProperties(new KsqlConfig(config));
try {
System.setProperty(RuntimeBuildContext.KSQL_TEST_TRACK_SERDE_TOPICS, "true");
final List<TopologyTestDriverContainer> topologyTestDrivers = topologyBuilder.buildStreamsTopologyTestDrivers(testCase, serviceContext, ksqlEngine, ksqlConfig, kafka, listener);
writeInputIntoTopics(testCase.getInputRecords(), kafka);
final Set<String> inputTopics = testCase.getInputRecords().stream().map(Record::getTopicName).collect(Collectors.toSet());
for (final TopologyTestDriverContainer topologyTestDriverContainer : topologyTestDrivers) {
if (validateResults) {
verifyTopology(testCase);
}
final Set<String> topicsFromInput = topologyTestDriverContainer.getSourceTopicNames().stream().filter(inputTopics::contains).collect(Collectors.toSet());
final Set<String> topicsFromKafka = topologyTestDriverContainer.getSourceTopicNames().stream().filter(topicName -> !inputTopics.contains(topicName)).collect(Collectors.toSet());
if (!topicsFromInput.isEmpty()) {
pipeRecordsFromProvidedInput(testCase, topologyTestDriverContainer);
}
for (final String kafkaTopic : topicsFromKafka) {
pipeRecordsFromKafka(kafkaTopic, topologyTestDriverContainer);
}
topologyTestDriverContainer.getTopologyTestDriver().producedTopicNames().forEach(topicInfoCache::get);
}
verifyOutput(testCase);
testCase.expectedException().map(ee -> {
throw new AssertionError("Expected test to throw" + StringDescription.toString(ee));
});
kafka.getAllTopics().stream().map(Topic::getName).forEach(topicInfoCache::get);
final List<PostTopicNode> knownTopics = topicInfoCache.all().stream().map(ti -> {
final Topic topic = kafka.getTopic(ti.getTopicName());
final OptionalInt partitions = topic == null ? OptionalInt.empty() : OptionalInt.of(topic.getNumPartitions());
final Optional<SchemaMetadata> keyMetadata = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), ti.getTopicName(), true);
final Optional<SchemaMetadata> valueMetadata = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), ti.getTopicName(), false);
return new PostTopicNode(ti.getTopicName(), ti.getKeyFormat(), ti.getValueFormat(), partitions, fromSchemaMetadata(keyMetadata), fromSchemaMetadata(valueMetadata));
}).collect(Collectors.toList());
final List<SourceNode> knownSources = ksqlEngine.getMetaStore().getAllDataSources().values().stream().map(SourceNode::fromDataSource).collect(Collectors.toList());
if (validateResults) {
testCase.getPostConditions().verify(ksqlEngine.getMetaStore(), knownTopics);
}
listener.runComplete(knownTopics, knownSources);
} catch (final RuntimeException e) {
final Optional<Matcher<Throwable>> expectedExceptionMatcher = testCase.expectedException();
if (!expectedExceptionMatcher.isPresent()) {
throw e;
}
assertThat(e, isThrowable(expectedExceptionMatcher.get()));
} finally {
System.clearProperty(RuntimeBuildContext.KSQL_TEST_TRACK_SERDE_TOPICS);
}
}
Aggregations