use of io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation in project ksql by confluentinc.
the class KsLocatorTest method shouldReturnLocalOwnerIfSameAsSuppliedLocalHost.
@Test
public void shouldReturnLocalOwnerIfSameAsSuppliedLocalHost() {
// Given:
final HostInfo localHostInfo = new HostInfo(LOCAL_HOST_URL.getHost(), LOCAL_HOST_URL.getPort());
final KsqlHostInfo localHost = locator.asKsqlHost(localHostInfo);
getActiveAndStandbyMetadata(localHostInfo);
when(activeFilter.filter(eq(localHost))).thenReturn(Host.include(localHost));
when(livenessFilter.filter(eq(localHost))).thenReturn(Host.include(localHost));
// When:
final List<KsqlPartitionLocation> result = locator.locate(ImmutableList.of(KEY), routingOptions, routingFilterFactoryActive, false);
// Then:
List<KsqlNode> nodeList = result.get(0).getNodes();
assertThat(nodeList.stream().findFirst().map(KsqlNode::isLocal), is(Optional.of(true)));
}
use of io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation in project ksql by confluentinc.
the class KsLocatorTest method shouldReturnRemoteOwnerForDifferentHost.
@Test
public void shouldReturnRemoteOwnerForDifferentHost() {
// Given:
final HostInfo localHostInfo = new HostInfo("different", LOCAL_HOST_URL.getPort());
final KsqlHostInfo localHost = locator.asKsqlHost(localHostInfo);
getActiveAndStandbyMetadata(localHostInfo);
when(activeFilter.filter(eq(localHost))).thenReturn(Host.include(localHost));
when(livenessFilter.filter(eq(localHost))).thenReturn(Host.include(localHost));
// When:
final List<KsqlPartitionLocation> result = locator.locate(ImmutableList.of(KEY), routingOptions, routingFilterFactoryActive, false);
// Then:
List<KsqlNode> nodeList = result.get(0).getNodes();
assertThat(nodeList.stream().findFirst().map(KsqlNode::isLocal), is(Optional.of(false)));
}
use of io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation in project ksql by confluentinc.
the class KsLocatorTest method shouldReturnRemoteOwnerForDifferentPortOnLocalHost.
@Test
public void shouldReturnRemoteOwnerForDifferentPortOnLocalHost() {
// Given:
final HostInfo localHostInfo = new HostInfo("LOCALhost", LOCAL_HOST_URL.getPort() + 1);
final KsqlHostInfo localHost = locator.asKsqlHost(localHostInfo);
getActiveAndStandbyMetadata(localHostInfo);
when(activeFilter.filter(eq(localHost))).thenReturn(Host.include(localHost));
when(livenessFilter.filter(eq(localHost))).thenReturn(Host.include(localHost));
// When:
final List<KsqlPartitionLocation> result = locator.locate(ImmutableList.of(KEY), routingOptions, routingFilterFactoryActive, false);
// Then:
List<KsqlNode> nodeList = result.get(0).getNodes();
assertThat(nodeList.stream().findFirst().map(KsqlNode::isLocal), is(Optional.of(false)));
}
use of io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation in project ksql by confluentinc.
the class HARouting method executeRounds.
private void executeRounds(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final List<KsqlPartitionLocation> locations, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) throws InterruptedException {
final ExecutorCompletionService<PartitionFetchResult> completionService = new ExecutorCompletionService<>(routerExecutorService);
final int totalPartitions = locations.size();
int processedPartitions = 0;
final Map<Integer, List<Exception>> exceptionsPerPartition = new HashMap<>();
for (final KsqlPartitionLocation partition : locations) {
final KsqlNode node = getNodeForRound(partition, routingOptions);
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordPartitionFetchRequest(1));
completionService.submit(() -> routeQuery.routeQuery(node, partition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
}
while (processedPartitions < totalPartitions) {
final Future<PartitionFetchResult> future = completionService.take();
try {
final PartitionFetchResult fetchResult = future.get();
if (fetchResult.isError()) {
exceptionsPerPartition.computeIfAbsent(fetchResult.location.getPartition(), v -> new ArrayList<>()).add(fetchResult.exception.get());
final KsqlPartitionLocation nextRoundPartition = nextNode(fetchResult.getLocation());
final KsqlNode node = getNodeForRound(nextRoundPartition, routingOptions);
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordResubmissionRequest(1));
completionService.submit(() -> routeQuery.routeQuery(node, nextRoundPartition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
} else {
Preconditions.checkState(fetchResult.getResult() == RoutingResult.SUCCESS);
processedPartitions++;
}
} catch (final Exception e) {
final MaterializationException exception = new MaterializationException("Unable to execute pull query: " + e.getMessage());
for (Entry<Integer, List<Exception>> entry : exceptionsPerPartition.entrySet()) {
for (Exception excp : entry.getValue()) {
exception.addSuppressed(excp);
}
}
throw exception;
}
}
pullQueryQueue.close();
}
use of io.confluent.ksql.execution.streams.materialization.Locator.KsqlPartitionLocation in project ksql by confluentinc.
the class HARouting method executeOrRouteQuery.
@SuppressWarnings("ParameterNumber")
@VisibleForTesting
static PartitionFetchResult executeOrRouteQuery(final KsqlNode node, final KsqlPartitionLocation location, final ConfiguredStatement<Query> statement, final ServiceContext serviceContext, final RoutingOptions routingOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final PullPhysicalPlan pullPhysicalPlan, final LogicalSchema outputSchema, final QueryId queryId, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
final BiFunction<List<?>, LogicalSchema, PullQueryRow> rowFactory = (rawRow, schema) -> new PullQueryRow(rawRow, schema, Optional.ofNullable(routingOptions.getIsDebugRequest() ? node : null), Optional.empty());
if (node.isLocal()) {
try {
LOG.debug("Query {} executed locally at host {} at timestamp {}.", statement.getStatementText(), node.location(), System.currentTimeMillis());
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordLocalRequests(1));
synchronized (pullPhysicalPlan) {
pullPhysicalPlan.execute(ImmutableList.of(location), pullQueryQueue, rowFactory);
return new PartitionFetchResult(RoutingResult.SUCCESS, location, Optional.empty());
}
} catch (StandbyFallbackException | NotUpToBoundException e) {
LOG.warn("Error executing query locally at node {}. Falling back to standby state which " + "may return stale results. Cause {}", node, e.getMessage());
return new PartitionFetchResult(RoutingResult.STANDBY_FALLBACK, location, Optional.of(e));
} catch (Exception e) {
throw new KsqlException(String.format("Error executing query locally at node %s: %s", node.location(), e.getMessage()), e);
}
} else {
try {
LOG.debug("Query {} routed to host {} at timestamp {}.", statement.getStatementText(), node.location(), System.currentTimeMillis());
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordRemoteRequests(1));
forwardTo(node, ImmutableList.of(location), statement, serviceContext, pullQueryQueue, rowFactory, outputSchema, shouldCancelRequests, consistencyOffsetVector);
return new PartitionFetchResult(RoutingResult.SUCCESS, location, Optional.empty());
} catch (StandbyFallbackException e) {
LOG.warn("Error forwarding query to node {}. Falling back to standby state which may " + "return stale results", node.location(), e.getCause());
return new PartitionFetchResult(RoutingResult.STANDBY_FALLBACK, location, Optional.of(e));
} catch (Exception e) {
throw new KsqlException(String.format("Error forwarding query to node %s: %s", node.location(), e.getMessage()), e);
}
}
}
Aggregations