use of io.confluent.ksql.engine.KsqlEngine in project ksql by confluentinc.
the class ListQueriesExecutorTest method shouldIncludeUnresponsiveIfShowQueriesExtendedErrorResponse.
@Test
public void shouldIncludeUnresponsiveIfShowQueriesExtendedErrorResponse() {
// Given
when(sessionProperties.getInternalRequest()).thenReturn(false);
final ConfiguredStatement<ListQueries> showQueries = (ConfiguredStatement<ListQueries>) engine.configure("SHOW QUERIES EXTENDED;");
final PersistentQueryMetadata metadata = givenPersistentQuery("id", RUNNING_QUERY_STATE);
final KsqlEngine engine = mock(KsqlEngine.class);
when(engine.getAllLiveQueries()).thenReturn(ImmutableList.of(metadata));
when(engine.getPersistentQueries()).thenReturn(ImmutableList.of(metadata));
when(response.isErroneous()).thenReturn(true);
when(response.getErrorMessage()).thenReturn(new KsqlErrorMessage(10000, "error"));
final Map<KsqlHostInfoEntity, KsqlQueryStatus> map = new HashMap<>();
map.put(LOCAL_KSQL_HOST_INFO_ENTITY, KsqlQueryStatus.RUNNING);
map.put(REMOTE_KSQL_HOST_INFO_ENTITY, KsqlQueryStatus.UNRESPONSIVE);
// When
final QueryDescriptionList queries = (QueryDescriptionList) CUSTOM_EXECUTORS.listQueries().execute(showQueries, sessionProperties, engine, serviceContext).getEntity().orElseThrow(IllegalStateException::new);
// Then
assertThat(queries.getQueryDescriptions(), containsInAnyOrder(QueryDescriptionFactory.forQueryMetadata(metadata, map)));
}
use of io.confluent.ksql.engine.KsqlEngine in project ksql by confluentinc.
the class ApiIntegrationTest method shouldExecutePushQueryFromLatestOffset.
@Test
public void shouldExecutePushQueryFromLatestOffset() {
KsqlEngine engine = (KsqlEngine) REST_APP.getEngine();
// One persistent query for the agg table
assertThatEventually(engine::numberOfLiveQueries, is(1));
// Given:
String sql = "SELECT * from " + TEST_STREAM + " EMIT CHANGES LIMIT 1;";
// Create a write stream to capture the incomplete response
ReceiveStream writeStream = new ReceiveStream(vertx);
// Make the request to stream a query
JsonObject queryProperties = new JsonObject().put("auto.offset.reset", "latest");
JsonObject queryRequestBody = new JsonObject().put("sql", sql).put("properties", queryProperties);
VertxCompletableFuture<HttpResponse<Void>> responseFuture = new VertxCompletableFuture<>();
client.post("/query-stream").as(BodyCodec.pipe(writeStream)).sendJsonObject(queryRequestBody, responseFuture);
assertThatEventually(engine::numberOfLiveQueries, is(2));
// New row to insert
JsonObject row = new JsonObject().put("K", new JsonObject().put("F1", new JsonArray().add("my_key_shouldExecutePushQueryFromLatestOffset"))).put("STR", "Value_shouldExecutePushQueryFromLatestOffset").put("LONG", 2000L).put("DEC", // JsonObject does not accept BigDecimal
12.34).put("BYTES_", new byte[] { 0, 1, 2 }).put("ARRAY", new JsonArray().add("a_shouldExecutePushQueryFromLatestOffset")).put("MAP", new JsonObject().put("k1", "v1_shouldExecutePushQueryFromLatestOffset")).put("STRUCT", new JsonObject().put("F1", 3)).put("COMPLEX", COMPLEX_FIELD_VALUE);
// Insert a new row and wait for it to arrive
assertThatEventually(() -> {
try {
// Attempt the insert multiple times, in case the query hasn't started yet
shouldInsert(row);
Buffer buff = writeStream.getBody();
QueryResponse queryResponse = new QueryResponse(buff.toString());
return queryResponse.rows.size();
} catch (Throwable t) {
return Integer.MAX_VALUE;
}
}, is(1));
// Verify that the received row is the expected one
Buffer buff = writeStream.getBody();
QueryResponse queryResponse = new QueryResponse(buff.toString());
assertThat(queryResponse.rows.get(0).getJsonObject(0), is(new JsonObject().put("F1", new JsonArray().add("my_key_shouldExecutePushQueryFromLatestOffset"))));
assertThat(queryResponse.rows.get(0).getString(1), is("Value_shouldExecutePushQueryFromLatestOffset"));
assertThat(queryResponse.rows.get(0).getLong(2), is(2000L));
assertThat(queryResponse.rows.get(0).getDouble(3), is(12.34));
assertThat(queryResponse.rows.get(0).getBinary(4), is(new byte[] { 0, 1, 2 }));
assertThat(queryResponse.rows.get(0).getJsonArray(5), is(new JsonArray().add("a_shouldExecutePushQueryFromLatestOffset")));
assertThat(queryResponse.rows.get(0).getJsonObject(6), is(new JsonObject().put("k1", "v1_shouldExecutePushQueryFromLatestOffset")));
assertThat(queryResponse.rows.get(0).getJsonObject(7), is(new JsonObject().put("F1", 3)));
assertThat(queryResponse.rows.get(0).getJsonObject(8), is(COMPLEX_FIELD_VALUE));
// Check that query is cleaned up on the server
assertThatEventually(engine::numberOfLiveQueries, is(1));
}
use of io.confluent.ksql.engine.KsqlEngine in project ksql by confluentinc.
the class TestExecutor method buildAndExecuteQuery.
public void buildAndExecuteQuery(final TestCase testCase, final TestExecutionListener listener) {
topicInfoCache.clear();
final KsqlConfig ksqlConfig = testCase.applyPersistedProperties(new KsqlConfig(config));
try {
System.setProperty(RuntimeBuildContext.KSQL_TEST_TRACK_SERDE_TOPICS, "true");
final List<TopologyTestDriverContainer> topologyTestDrivers = topologyBuilder.buildStreamsTopologyTestDrivers(testCase, serviceContext, ksqlEngine, ksqlConfig, kafka, listener);
writeInputIntoTopics(testCase.getInputRecords(), kafka);
final Set<String> inputTopics = testCase.getInputRecords().stream().map(Record::getTopicName).collect(Collectors.toSet());
for (final TopologyTestDriverContainer topologyTestDriverContainer : topologyTestDrivers) {
if (validateResults) {
verifyTopology(testCase);
}
final Set<String> topicsFromInput = topologyTestDriverContainer.getSourceTopicNames().stream().filter(inputTopics::contains).collect(Collectors.toSet());
final Set<String> topicsFromKafka = topologyTestDriverContainer.getSourceTopicNames().stream().filter(topicName -> !inputTopics.contains(topicName)).collect(Collectors.toSet());
if (!topicsFromInput.isEmpty()) {
pipeRecordsFromProvidedInput(testCase, topologyTestDriverContainer);
}
for (final String kafkaTopic : topicsFromKafka) {
pipeRecordsFromKafka(kafkaTopic, topologyTestDriverContainer);
}
topologyTestDriverContainer.getTopologyTestDriver().producedTopicNames().forEach(topicInfoCache::get);
}
verifyOutput(testCase);
testCase.expectedException().map(ee -> {
throw new AssertionError("Expected test to throw" + StringDescription.toString(ee));
});
kafka.getAllTopics().stream().map(Topic::getName).forEach(topicInfoCache::get);
final List<PostTopicNode> knownTopics = topicInfoCache.all().stream().map(ti -> {
final Topic topic = kafka.getTopic(ti.getTopicName());
final OptionalInt partitions = topic == null ? OptionalInt.empty() : OptionalInt.of(topic.getNumPartitions());
final Optional<SchemaMetadata> keyMetadata = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), ti.getTopicName(), true);
final Optional<SchemaMetadata> valueMetadata = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), ti.getTopicName(), false);
return new PostTopicNode(ti.getTopicName(), ti.getKeyFormat(), ti.getValueFormat(), partitions, fromSchemaMetadata(keyMetadata), fromSchemaMetadata(valueMetadata));
}).collect(Collectors.toList());
final List<SourceNode> knownSources = ksqlEngine.getMetaStore().getAllDataSources().values().stream().map(SourceNode::fromDataSource).collect(Collectors.toList());
if (validateResults) {
testCase.getPostConditions().verify(ksqlEngine.getMetaStore(), knownTopics);
}
listener.runComplete(knownTopics, knownSources);
} catch (final RuntimeException e) {
final Optional<Matcher<Throwable>> expectedExceptionMatcher = testCase.expectedException();
if (!expectedExceptionMatcher.isPresent()) {
throw e;
}
assertThat(e, isThrowable(expectedExceptionMatcher.get()));
} finally {
System.clearProperty(RuntimeBuildContext.KSQL_TEST_TRACK_SERDE_TOPICS);
}
}
use of io.confluent.ksql.engine.KsqlEngine in project ksql by confluentinc.
the class InteractiveStatementExecutorTest method setUp.
@Before
public void setUp() {
ksqlConfig = KsqlConfigTestUtil.create(CLUSTER, ImmutableMap.of(StreamsConfig.APPLICATION_SERVER_CONFIG, "http://host:1234"));
final FakeKafkaTopicClient fakeKafkaTopicClient = new FakeKafkaTopicClient();
fakeKafkaTopicClient.createTopic("pageview_topic", 1, (short) 1, emptyMap());
fakeKafkaTopicClient.createTopic("foo", 1, (short) 1, emptyMap());
fakeKafkaTopicClient.createTopic("pageview_topic_json", 1, (short) 1, emptyMap());
serviceContext = TestServiceContext.create(fakeKafkaTopicClient);
final SpecificQueryIdGenerator hybridQueryIdGenerator = new SpecificQueryIdGenerator();
final MetricCollectors metricCollectors = new MetricCollectors();
ksqlEngine = KsqlEngineTestUtil.createKsqlEngine(serviceContext, new MetaStoreImpl(new InternalFunctionRegistry()), (engine) -> new KsqlEngineMetrics("", engine, Collections.emptyMap(), Optional.empty(), metricCollectors), hybridQueryIdGenerator, ksqlConfig, metricCollectors);
statementParser = new StatementParser(ksqlEngine);
statementExecutor = new InteractiveStatementExecutor(serviceContext, ksqlEngine, statementParser, hybridQueryIdGenerator, InternalTopicSerdes.deserializer(Command.class));
statementExecutorWithMocks = new InteractiveStatementExecutor(serviceContext, mockEngine, mockParser, mockQueryIdGenerator, commandDeserializer);
statementExecutor.configure(ksqlConfig);
statementExecutorWithMocks.configure(ksqlConfig);
plannedCommand = new Command(CREATE_STREAM_FOO_STATEMENT, emptyMap(), ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), Optional.of(plan));
}
use of io.confluent.ksql.engine.KsqlEngine in project ksql by confluentinc.
the class ExplainExecutorTest method shouldExplainQueryId.
@Test
public void shouldExplainQueryId() {
// Given:
final ConfiguredStatement<Explain> explain = (ConfiguredStatement<Explain>) engine.configure("EXPLAIN id;");
final PersistentQueryMetadata metadata = givenPersistentQuery("id");
final KsqlEngine engine = mock(KsqlEngine.class);
when(engine.getPersistentQuery(metadata.getQueryId())).thenReturn(Optional.of(metadata));
// When:
final QueryDescriptionEntity query = (QueryDescriptionEntity) customExecutors.explain().execute(explain, sessionProperties, engine, this.engine.getServiceContext()).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertThat(query.getQueryDescription(), equalTo(QueryDescriptionFactory.forQueryMetadata(metadata, Collections.singletonMap(new KsqlHostInfoEntity(LOCAL_HOST), STATE))));
}
Aggregations