use of io.confluent.ksql.execution.streams.materialization.MaterializationException in project ksql by confluentinc.
the class KsStateStore method store.
<T> T store(final QueryableStoreType<T> queryableStoreType, final int partition) {
try {
final boolean enableStaleStores = ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_ENABLE_STANDBY_READS);
final boolean sharedRuntime = kafkaStreams instanceof KafkaStreamsNamedTopologyWrapper;
final StoreQueryParameters<T> parameters = sharedRuntime ? NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(queryId, stateStoreName, queryableStoreType).withPartition(partition) : StoreQueryParameters.fromNameAndType(stateStoreName, queryableStoreType).withPartition(partition);
return enableStaleStores ? kafkaStreams.store(parameters.enableStaleStores()) : kafkaStreams.store(parameters);
} catch (final Exception e) {
final State state = kafkaStreams.state();
if (state != State.RUNNING) {
throw new NotRunningException("The query was not in a running state. state: " + state);
}
throw new MaterializationException("State store currently unavailable: " + stateStoreName, e);
}
}
use of io.confluent.ksql.execution.streams.materialization.MaterializationException in project ksql by confluentinc.
the class KsMaterializedSessionTableIQv2 method get.
@Override
public KsMaterializedQueryResult<WindowedRow> get(final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position) {
try {
final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key);
StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request = inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result = stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult = result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it = queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next();
final Window wnd = next.key.window();
if (!windowStart.contains(wnd.startTime())) {
continue;
}
if (!windowEnd.contains(wnd.endTime())) {
continue;
}
final long rowTime = wnd.end();
final WindowedRow row = WindowedRow.of(stateStore.schema(), next.key, next.value, rowTime);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
use of io.confluent.ksql.execution.streams.materialization.MaterializationException in project ksql by confluentinc.
the class KsMaterializedWindowTableIQv2 method get.
@Override
public KsMaterializedQueryResult<WindowedRow> get(final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position) {
try {
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper);
StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final KafkaStreams streams = stateStore.getKafkaStreams();
final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request);
final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(Collections.emptyIterator(), queryResult.getPosition());
}
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp());
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
use of io.confluent.ksql.execution.streams.materialization.MaterializationException in project ksql by confluentinc.
the class HARouting method executeRounds.
private void executeRounds(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final List<KsqlPartitionLocation> locations, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) throws InterruptedException {
final ExecutorCompletionService<PartitionFetchResult> completionService = new ExecutorCompletionService<>(routerExecutorService);
final int totalPartitions = locations.size();
int processedPartitions = 0;
final Map<Integer, List<Exception>> exceptionsPerPartition = new HashMap<>();
for (final KsqlPartitionLocation partition : locations) {
final KsqlNode node = getNodeForRound(partition, routingOptions);
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordPartitionFetchRequest(1));
completionService.submit(() -> routeQuery.routeQuery(node, partition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
}
while (processedPartitions < totalPartitions) {
final Future<PartitionFetchResult> future = completionService.take();
try {
final PartitionFetchResult fetchResult = future.get();
if (fetchResult.isError()) {
exceptionsPerPartition.computeIfAbsent(fetchResult.location.getPartition(), v -> new ArrayList<>()).add(fetchResult.exception.get());
final KsqlPartitionLocation nextRoundPartition = nextNode(fetchResult.getLocation());
final KsqlNode node = getNodeForRound(nextRoundPartition, routingOptions);
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordResubmissionRequest(1));
completionService.submit(() -> routeQuery.routeQuery(node, nextRoundPartition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
} else {
Preconditions.checkState(fetchResult.getResult() == RoutingResult.SUCCESS);
processedPartitions++;
}
} catch (final Exception e) {
final MaterializationException exception = new MaterializationException("Unable to execute pull query: " + e.getMessage());
for (Entry<Integer, List<Exception>> entry : exceptionsPerPartition.entrySet()) {
for (Exception excp : entry.getValue()) {
exception.addSuppressed(excp);
}
}
throw exception;
}
}
pullQueryQueue.close();
}
use of io.confluent.ksql.execution.streams.materialization.MaterializationException in project ksql by confluentinc.
the class HARouting method handlePullQuery.
public CompletableFuture<Void> handlePullQuery(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator().locate(pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN);
final Map<Integer, List<Host>> emptyPartitions = allLocations.stream().filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())).collect(Collectors.toMap(KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList())));
if (!emptyPartitions.isEmpty()) {
final MaterializationException materializationException = new MaterializationException("Unable to execute pull query. " + emptyPartitions.entrySet().stream().map(kv -> String.format("Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())).collect(Collectors.joining(", ", "[", "]")));
LOG.debug(materializationException.getMessage());
throw materializationException;
}
// at this point we should filter out the hosts that we should not route to
final List<KsqlPartitionLocation> locations = allLocations.stream().map(KsqlPartitionLocation::removeFilteredHosts).collect(Collectors.toList());
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
coordinatorExecutorService.submit(() -> {
try {
executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, outputSchema, queryId, locations, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
completableFuture.complete(null);
} catch (Throwable t) {
completableFuture.completeExceptionally(t);
}
});
return completableFuture;
}
Aggregations