use of io.confluent.ksql.schema.ksql.LogicalSchema in project ksql by confluentinc.
the class HARouting method handlePullQuery.
public CompletableFuture<Void> handlePullQuery(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator().locate(pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN);
final Map<Integer, List<Host>> emptyPartitions = allLocations.stream().filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())).collect(Collectors.toMap(KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList())));
if (!emptyPartitions.isEmpty()) {
final MaterializationException materializationException = new MaterializationException("Unable to execute pull query. " + emptyPartitions.entrySet().stream().map(kv -> String.format("Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())).collect(Collectors.joining(", ", "[", "]")));
LOG.debug(materializationException.getMessage());
throw materializationException;
}
// at this point we should filter out the hosts that we should not route to
final List<KsqlPartitionLocation> locations = allLocations.stream().map(KsqlPartitionLocation::removeFilteredHosts).collect(Collectors.toList());
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
coordinatorExecutorService.submit(() -> {
try {
executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, outputSchema, queryId, locations, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
completableFuture.complete(null);
} catch (Throwable t) {
completableFuture.completeExceptionally(t);
}
});
return completableFuture;
}
use of io.confluent.ksql.schema.ksql.LogicalSchema in project ksql by confluentinc.
the class HARouting method streamedRowsHandler.
private static Consumer<List<StreamedRow>> streamedRowsHandler(final KsqlNode owner, final PullQueryQueue pullQueryQueue, final BiFunction<List<?>, LogicalSchema, PullQueryRow> rowFactory, final LogicalSchema outputSchema, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
final AtomicInteger processedRows = new AtomicInteger(0);
final AtomicReference<Header> header = new AtomicReference<>();
return streamedRows -> {
try {
if (streamedRows == null || streamedRows.isEmpty()) {
return;
}
final List<PullQueryRow> rows = new ArrayList<>();
// If this is the first row overall, skip the header
final int previousProcessedRows = processedRows.getAndAdd(streamedRows.size());
for (int i = 0; i < streamedRows.size(); i++) {
final StreamedRow row = streamedRows.get(i);
if (i == 0 && previousProcessedRows == 0) {
final Optional<Header> optionalHeader = row.getHeader();
optionalHeader.ifPresent(h -> validateSchema(outputSchema, h.getSchema(), owner));
optionalHeader.ifPresent(header::set);
continue;
}
if (row.getErrorMessage().isPresent()) {
// If we receive an error that's not a network error, we let that bubble up.
throw new KsqlException(row.getErrorMessage().get().getMessage());
}
if (!row.getRow().isPresent()) {
parseNonDataRows(row, i, consistencyOffsetVector);
continue;
}
final List<?> r = row.getRow().get().getColumns();
Preconditions.checkNotNull(header.get());
rows.add(rowFactory.apply(r, header.get().getSchema()));
}
if (!pullQueryQueue.acceptRows(rows)) {
LOG.error("Failed to queue all rows");
}
} catch (Exception e) {
throw new KsqlException(e.getMessage(), e);
}
};
}
use of io.confluent.ksql.schema.ksql.LogicalSchema in project ksql by confluentinc.
the class PlanSummary method summarize.
private StepSummary summarize(final ExecutionStep<?> step, final String indent) {
final StringBuilder stringBuilder = new StringBuilder();
final List<StepSummary> sourceSummaries = step.getSources().stream().map(s -> summarize(s, indent + "\t")).collect(Collectors.toList());
final String opName = OP_NAME.get(step.getClass());
if (opName == null) {
throw new UnsupportedOperationException("Unsupported step type: " + step.getClass() + ", please add a step type");
}
final LogicalSchema schema = getSchema(step, sourceSummaries);
stringBuilder.append(indent).append(" > [ ").append(opName).append(" ] | Schema: ").append(schema.toString(FORMAT_OPTIONS)).append(" | Logger: ").append(QueryLoggerUtil.queryLoggerName(queryId, step.getProperties().getQueryContext())).append("\n");
for (final StepSummary sourceSummary : sourceSummaries) {
stringBuilder.append("\t").append(indent).append(sourceSummary.summary);
}
return new StepSummary(schema, stringBuilder.toString());
}
use of io.confluent.ksql.schema.ksql.LogicalSchema in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldQueryMaterializedTableWithKeyFieldsInProjection.
@Test
public void shouldQueryMaterializedTableWithKeyFieldsInProjection() {
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*), AS_VALUE(USERID) AS USERID_2 FROM " + USER_TABLE + " GROUP BY USERID;");
final LogicalSchema schema = schema("KSQL_COL_0", SqlTypes.BIGINT, "USERID_2", SqlTypes.STRING);
final Map<String, GenericRow> rows = waitForUniqueUserRows(STRING_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
assertThat(materialization.windowType(), is(Optional.empty()));
final MaterializedTable table = materialization.nonWindowed();
rows.forEach((rowKey, value) -> {
final GenericKey key = genericKey(rowKey);
final List<Row> rowList = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION)));
assertThat(rowList.size(), is(1));
assertThat(rowList.get(0).schema(), is(schema));
assertThat(rowList.get(0).key(), is(key));
assertThat(rowList.get(0).value(), is(value));
});
}
use of io.confluent.ksql.schema.ksql.LogicalSchema in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldQueryMaterializedTableForAggregatedTable.
@Test
public void shouldQueryMaterializedTableForAggregatedTable() {
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*) FROM " + USER_TABLE + " GROUP BY USERID;");
final LogicalSchema schema = schema("KSQL_COL_0", SqlTypes.BIGINT);
final Map<String, GenericRow> rows = waitForUniqueUserRows(STRING_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
assertThat(materialization.windowType(), is(Optional.empty()));
final MaterializedTable table = materialization.nonWindowed();
rows.forEach((rowKey, value) -> {
final GenericKey key = genericKey(rowKey);
final Iterator<Row> rowIterator = withRetry(() -> table.get(key, PARTITION));
assertThat(rowIterator.hasNext(), is(true));
final Row row = rowIterator.next();
assertThat(row.schema(), is(schema));
assertThat(row.key(), is(key));
assertThat(row.value(), is(value));
});
final GenericKey key = genericKey("Won't find me");
assertThat("unknown key", withRetry(() -> table.get(key, PARTITION).hasNext()), is(false));
}
Aggregations