use of io.confluent.ksql.api.client.SourceDescription in project ksql by confluentinc.
the class DestroyMigrationsCommand method deleteMigrationsTable.
private boolean deleteMigrationsTable(final Client ksqlClient, final String tableName) {
try {
if (!sourceExists(ksqlClient, tableName, true)) {
LOGGER.info("Metadata table does not exist. Skipping cleanup.");
return true;
}
final SourceDescription tableInfo = getSourceInfo(ksqlClient, tableName, true);
terminateQueryForTable(ksqlClient, tableInfo);
dropSource(ksqlClient, tableName, true);
return true;
} catch (MigrationException e) {
LOGGER.error(e.getMessage());
return false;
}
}
use of io.confluent.ksql.api.client.SourceDescription in project ksql by confluentinc.
the class ClientImpl method describeSource.
@Override
public CompletableFuture<SourceDescription> describeSource(final String sourceName) {
final CompletableFuture<SourceDescription> cf = new CompletableFuture<>();
makePostRequest(KSQL_ENDPOINT, new JsonObject().put("ksql", "describe " + sourceName + ";").put("sessionVariables", sessionVariables), cf, response -> handleSingleEntityResponse(response, cf, AdminResponseHandlers::handleDescribeSourceResponse));
return cf;
}
use of io.confluent.ksql.api.client.SourceDescription in project ksql by confluentinc.
the class ClientIntegrationTest method shouldDescribeSource.
@Test
public void shouldDescribeSource() throws Exception {
// When
final SourceDescription description = client.describeSource(TEST_STREAM).get();
// Then
assertThat(description.name(), is(TEST_STREAM));
assertThat(description.type(), is("STREAM"));
assertThat(description.fields(), hasSize(TEST_COLUMN_NAMES.size()));
for (int i = 0; i < TEST_COLUMN_NAMES.size(); i++) {
assertThat(description.fields().get(i).name(), is(TEST_COLUMN_NAMES.get(i)));
assertThat(description.fields().get(i).type().getType(), is(TEST_COLUMN_TYPES.get(i).getType()));
final boolean isKey = TEST_COLUMN_NAMES.get(i).equals(TEST_DATA_PROVIDER.key());
assertThat(description.fields().get(i).isKey(), is(isKey));
}
assertThat(description.topic(), is(TEST_TOPIC));
assertThat(description.keyFormat(), is("JSON"));
assertThat(description.valueFormat(), is("JSON"));
assertThat(description.readQueries(), hasSize(1));
assertThat(description.readQueries().get(0).getQueryType(), is(QueryType.PERSISTENT));
assertThat(description.readQueries().get(0).getId(), startsWith("CTAS_" + AGG_TABLE));
assertThat(description.readQueries().get(0).getSql(), is("CREATE TABLE " + AGG_TABLE + " WITH (KAFKA_TOPIC='" + AGG_TABLE + "', PARTITIONS=1, REPLICAS=1) AS SELECT\n" + " " + TEST_STREAM + ".K K,\n" + " LATEST_BY_OFFSET(" + TEST_STREAM + ".LONG) LONG\n" + "FROM " + TEST_STREAM + " " + TEST_STREAM + "\n" + "GROUP BY " + TEST_STREAM + ".K\n" + "EMIT CHANGES;"));
assertThat(description.readQueries().get(0).getSink(), is(Optional.of(AGG_TABLE)));
assertThat(description.readQueries().get(0).getSinkTopic(), is(Optional.of(AGG_TABLE)));
assertThat(description.writeQueries(), hasSize(0));
assertThat(description.timestampColumn(), is(Optional.empty()));
assertThat(description.windowType(), is(Optional.empty()));
assertThat(description.sqlStatement(), is("CREATE STREAM " + TEST_STREAM + " (K STRUCT<F1 ARRAY<STRING>> KEY, STR STRING, LONG BIGINT, DEC DECIMAL(4, 2), " + "BYTES_ BYTES, ARRAY ARRAY<STRING>, MAP MAP<STRING, STRING>, STRUCT STRUCT<F1 INTEGER>, " + "COMPLEX STRUCT<`DECIMAL` DECIMAL(2, 1), STRUCT STRUCT<F1 STRING, F2 INTEGER>, " + "ARRAY_ARRAY ARRAY<ARRAY<STRING>>, ARRAY_STRUCT ARRAY<STRUCT<F1 STRING>>, " + "ARRAY_MAP ARRAY<MAP<STRING, INTEGER>>, MAP_ARRAY MAP<STRING, ARRAY<STRING>>, " + "MAP_MAP MAP<STRING, MAP<STRING, INTEGER>>, MAP_STRUCT MAP<STRING, STRUCT<F1 STRING>>>, TIMESTAMP TIMESTAMP, DATE DATE, TIME TIME, HEAD BYTES HEADER('h0')) " + "WITH (KAFKA_TOPIC='STRUCTURED_TYPES_TOPIC', KEY_FORMAT='JSON', VALUE_FORMAT='JSON');"));
}
Aggregations