use of io.confluent.ksql.query.PullQueryQueue in project ksql by confluentinc.
the class HARoutingTest method shouldCallRouteQuery_partitionFailure.
@Test
public void shouldCallRouteQuery_partitionFailure() throws InterruptedException, ExecutionException {
// Given:
locate(location1, location2, location3, location4);
doThrow(new StandbyFallbackException("Error")).when(pullPhysicalPlan).execute(eq(ImmutableList.of(location1)), any(), any());
doAnswer(i -> {
final PullQueryQueue queue = i.getArgument(1);
queue.acceptRow(PQ_ROW3);
return null;
}).when(pullPhysicalPlan).execute(eq(ImmutableList.of(location3)), any(), any());
when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any())).thenAnswer(new Answer() {
private int count = 0;
public Object answer(InvocationOnMock i) {
Map<String, ?> requestProperties = i.getArgument(3);
Consumer<List<StreamedRow>> rowConsumer = i.getArgument(4);
if (requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS).toString().equalsIgnoreCase("2")) {
assertThat(count, is(0));
rowConsumer.accept(ImmutableList.of(StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW2), Optional.empty())));
}
if (requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS).toString().equalsIgnoreCase("4")) {
assertThat(count, is(1));
rowConsumer.accept(ImmutableList.of(StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW4), Optional.empty())));
}
if (requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS).toString().equalsIgnoreCase("1")) {
assertThat(count, is(2));
rowConsumer.accept(ImmutableList.of(StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW1), Optional.empty())));
}
count++;
return RestResponse.successful(200, 2);
}
});
// When:
CompletableFuture<Void> future = haRouting.handlePullQuery(serviceContext, pullPhysicalPlan, statement, routingOptions, logicalSchema, queryId, pullQueryQueue, disconnect, Optional.empty());
future.get();
// Then:
verify(pullPhysicalPlan).execute(eq(ImmutableList.of(location1)), any(), any());
verify(pullPhysicalPlan).execute(eq(ImmutableList.of(location3)), any(), any());
verify(ksqlClient, times(3)).makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any());
assertThat(pullQueryQueue.size(), is(4));
assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW2));
assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW3));
assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW4));
assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW1));
final double fetch_count = getMetricValue("-partition-fetch-count");
final double resubmission_count = getMetricValue("-partition-fetch-resubmission-count");
assertThat(fetch_count, is(5.0));
assertThat(resubmission_count, is(1.0));
}
use of io.confluent.ksql.query.PullQueryQueue in project ksql by confluentinc.
the class HARoutingTest method shouldCallRouteQuery_success.
@Test
public void shouldCallRouteQuery_success() throws InterruptedException, ExecutionException {
// Given:
locate(location1, location2, location3, location4);
doAnswer(i -> {
final PullQueryQueue queue = i.getArgument(1);
queue.acceptRow(PQ_ROW1);
return null;
}).when(pullPhysicalPlan).execute(eq(ImmutableList.of(location1)), any(), any());
doNothing().when(pullPhysicalPlan).execute(eq(ImmutableList.of(location3)), any(), any());
when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any())).thenAnswer(new Answer() {
private int count = 0;
public Object answer(InvocationOnMock i) {
Map<String, ?> requestProperties = i.getArgument(3);
Consumer<List<StreamedRow>> rowConsumer = i.getArgument(4);
if (requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS).toString().equalsIgnoreCase("2")) {
assertThat(count, is(0));
}
if (requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS).toString().equalsIgnoreCase("4")) {
assertThat(count, is(1));
rowConsumer.accept(ImmutableList.of(StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW2), Optional.empty())));
}
count++;
return RestResponse.successful(200, 2);
}
});
// When:
CompletableFuture<Void> future = haRouting.handlePullQuery(serviceContext, pullPhysicalPlan, statement, routingOptions, logicalSchema, queryId, pullQueryQueue, disconnect, Optional.empty());
future.get();
// Then:
verify(pullPhysicalPlan).execute(eq(ImmutableList.of(location1)), any(), any());
verify(pullPhysicalPlan).execute(eq(ImmutableList.of(location3)), any(), any());
verify(ksqlClient, times(2)).makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any());
assertThat(pullQueryQueue.size(), is(2));
assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW1));
assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW2));
final double fetch_count = getMetricValue("-partition-fetch-count");
final double resubmission_count = getMetricValue("-partition-fetch-resubmission-count");
assertThat(fetch_count, is(4.0));
assertThat(resubmission_count, is(0.0));
}
use of io.confluent.ksql.query.PullQueryQueue in project ksql by confluentinc.
the class EngineExecutor method executeTablePullQuery.
/**
* Evaluates a pull query by first analyzing it, then building the logical plan and finally
* the physical plan. The execution is then done using the physical plan in a pipelined manner.
* @param statement The pull query
* @param routingOptions Configuration parameters used for HA routing
* @param pullQueryMetrics JMX metrics
* @return the rows that are the result of evaluating the pull query
*/
PullQueryResult executeTablePullQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final HARouting routing, final RoutingOptions routingOptions, final QueryPlannerOptions queryPlannerOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final boolean startImmediately, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
if (!statement.getStatement().isPullQuery()) {
throw new IllegalArgumentException("Executor can only handle pull queries");
}
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = routingOptions.getIsSkipForwardRequest() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PullPhysicalPlan plan = null;
try {
// Do not set sessionConfig.getConfig to true! The copying is inefficient and slows down pull
// query performance significantly. Instead use QueryPlannerOptions which check overrides
// deliberately.
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, false);
// This is a cancel signal that is used to stop both local operations and requests
final CompletableFuture<Void> shouldCancelRequests = new CompletableFuture<>();
plan = buildPullPhysicalPlan(logicalPlan, analysis, queryPlannerOptions, shouldCancelRequests, consistencyOffsetVector);
final PullPhysicalPlan physicalPlan = plan;
final PullQueryQueue pullQueryQueue = new PullQueryQueue(analysis.getLimitClause());
final PullQueryQueuePopulator populator = () -> routing.handlePullQuery(serviceContext, physicalPlan, statement, routingOptions, physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
final PullQueryResult result = new PullQueryResult(physicalPlan.getOutputSchema(), populator, physicalPlan.getQueryId(), pullQueryQueue, pullQueryMetrics, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType, physicalPlan::getRowsReadFromDataSource, shouldCancelRequests, consistencyOffsetVector);
if (startImmediately) {
result.start();
}
return result;
} catch (final Exception e) {
if (plan == null) {
pullQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PullPhysicalPlan physicalPlan = plan;
pullQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute pull query {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information. If you " + "see this LOG message, please submit a GitHub ticket and we will scrub " + "the statement text from the error at {}", routingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute pull query. {} {}", routingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed pull query text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
Aggregations