use of io.confluent.ksql.util.PushOffsetRange in project ksql by confluentinc.
the class CatchupConsumerTest method shouldRunConsumer_success_waitForLatestAssignment.
@Test
public void shouldRunConsumer_success_waitForLatestAssignment() {
// Given:
PushOffsetRange offsetRange = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(1L, 2L)));
when(latestConsumer.getAssignment()).thenReturn(null);
AtomicReference<CatchupConsumer> cRef = new AtomicReference<>();
// Rather than wait, simulate the latest getting an assignment
final BiConsumer<Object, Long> waitFn = (o, wait) -> cRef.get().newAssignment(ImmutableSet.of(TP0, TP1));
try (CatchupConsumer consumer = new CatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, offsetRange, clock, sleepFn, waitFn, 0, pq -> caughtUp = true)) {
cRef.set(consumer);
runSuccessfulTest(consumer);
}
}
use of io.confluent.ksql.util.PushOffsetRange in project ksql by confluentinc.
the class CatchupConsumerTest method shouldRunConsumer_queueIsAtLimit.
@Test
public void shouldRunConsumer_queueIsAtLimit() {
// Given:
PushOffsetRange offsetRange = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(1L, 2L)));
when(queue.isAtLimit()).thenReturn(false, true, true, false);
try (CatchupConsumer consumer = new CatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, offsetRange, clock, sleepFn, waitFn, 0, pq -> caughtUp = true)) {
// When:
consumer.register(queue);
runSuccessfulTest(consumer);
verify(sleepFn, times(2)).accept(any());
}
}
use of io.confluent.ksql.util.PushOffsetRange in project ksql by confluentinc.
the class EngineExecutor method executeScalablePushQuery.
ScalablePushQueryMetadata executeScalablePushQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final PushRouting pushRouting, final PushRoutingOptions pushRoutingOptions, final QueryPlannerOptions queryPlannerOptions, final Context context, final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics) {
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = pushRoutingOptions.getHasBeenForwarded() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PushPhysicalPlan plan = null;
try {
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, true);
final PushPhysicalPlanCreator pushPhysicalPlanCreator = (offsetRange, catchupConsumerGroup) -> buildScalablePushPhysicalPlan(logicalPlan, analysis, context, offsetRange, catchupConsumerGroup);
final Optional<PushOffsetRange> offsetRange = pushRoutingOptions.getContinuationToken().map(PushOffsetRange::deserialize);
final Optional<String> catchupConsumerGroup = pushRoutingOptions.getCatchupConsumerGroup();
final PushPhysicalPlanManager physicalPlanManager = new PushPhysicalPlanManager(pushPhysicalPlanCreator, catchupConsumerGroup, offsetRange);
final PushPhysicalPlan physicalPlan = physicalPlanManager.getPhysicalPlan();
plan = physicalPlan;
final TransientQueryQueue transientQueryQueue = new TransientQueryQueue(analysis.getLimitClause());
final PushQueryMetadata.ResultType resultType = physicalPlan.getScalablePushRegistry().isTable() ? physicalPlan.getScalablePushRegistry().isWindowed() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM;
final PushQueryQueuePopulator populator = () -> pushRouting.handlePushQuery(serviceContext, physicalPlanManager, statement, pushRoutingOptions, physicalPlan.getOutputSchema(), transientQueryQueue, scalablePushQueryMetrics, offsetRange);
final PushQueryPreparer preparer = () -> pushRouting.preparePushQuery(physicalPlanManager, statement, pushRoutingOptions);
final ScalablePushQueryMetadata metadata = new ScalablePushQueryMetadata(physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), transientQueryQueue, scalablePushQueryMetrics, resultType, populator, preparer, physicalPlan.getSourceType(), routingNodeType, physicalPlan::getRowsReadFromDataSource);
return metadata;
} catch (final Exception e) {
if (plan == null) {
scalablePushQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PushPhysicalPlan pushPhysicalPlan = plan;
scalablePushQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, pushPhysicalPlan.getSourceType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute push query V2 {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information." + " If you see this LOG message, please submit a GitHub ticket and" + " we will scrub the statement text from the error at {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute push query V2. {} {}", pushRoutingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed push query V2 text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
use of io.confluent.ksql.util.PushOffsetRange in project ksql by confluentinc.
the class ScalablePushQueryFunctionalTest method shouldCatchupFromSomeToken.
@Test
public void shouldCatchupFromSomeToken() throws ExecutionException, InterruptedException {
assertAllPersistentQueriesRunning(true);
TEST_HARNESS.produceRows(pageViewDataProvider.topicName(), pageViewDataProvider, FormatFactory.KAFKA, FormatFactory.JSON);
final CompletableFuture<StreamedRow> header = new CompletableFuture<>();
final CompletableFuture<List<StreamedRow>> complete = new CompletableFuture<>();
final PushOffsetRange range = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(0L, 0L)));
makeRequestAndSetupSubscriber("SELECT USERID, PAGEID, VIEWTIME from " + streamName + " EMIT CHANGES;", ImmutableMap.of("auto.offset.reset", "latest"), ImmutableMap.of(KsqlRequestConfig.KSQL_REQUEST_QUERY_PUSH_CONTINUATION_TOKEN, range.serialize()), header, complete);
header.get();
assertThatEventually(() -> subscriber.getUniqueRows().size(), is(pageViewDataProvider.data().size() + 1));
List<StreamedRow> orderedRows = subscriber.getUniqueRows().stream().sorted(this::compareByTimestamp).collect(Collectors.toList());
assertFirstBatchOfRows(orderedRows);
}
use of io.confluent.ksql.util.PushOffsetRange in project ksql by confluentinc.
the class PushRoutingTest method shouldSucceed_gapDetectedRemote_retry.
@Test
public void shouldSucceed_gapDetectedRemote_retry() throws ExecutionException, InterruptedException {
// Given:
final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>(ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote));
final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true);
AtomicReference<TestRemotePublisher> remotePublisher = new AtomicReference<>();
AtomicInteger remoteCount = new AtomicInteger(0);
when(simpleKsqlClient.makeQueryRequestStreamed(any(), any(), any(), any())).thenAnswer(a -> {
remotePublisher.set(new TestRemotePublisher(context));
remoteCount.incrementAndGet();
final Map<String, ?> requestProperties = a.getArgument(3);
String continuationToken = (String) requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PUSH_CONTINUATION_TOKEN);
if (remoteCount.get() == 1) {
assertThat(continuationToken, nullValue());
} else if (remoteCount.get() == 2) {
assertThat(continuationToken, notNullValue());
final PushOffsetRange range = PushOffsetRange.deserialize(continuationToken);
assertThat(range.getEndOffsets().getDenseRepresentation(), is(ImmutableList.of(0L, 3L)));
remotePublisher.get().accept(REMOTE_ROW2);
}
return createFuture(RestResponse.successful(200, remotePublisher.get()));
});
// When:
final PushConnectionsHandle handle = handlePushRouting(routing);
final AtomicReference<Throwable> exception = new AtomicReference<>(null);
handle.onException(exception::set);
context.runOnContext(v -> {
remotePublisher.get().accept(REMOTE_CONTINUATION_TOKEN1);
remotePublisher.get().accept(REMOTE_ROW1);
remotePublisher.get().accept(REMOTE_CONTINUATION_TOKEN_GAP);
});
Set<List<?>> rows = waitOnRows(2);
handle.close();
// Then:
verify(simpleKsqlClient, times(2)).makeQueryRequestStreamed(any(), any(), any(), any());
assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true));
assertThat(rows.contains(REMOTE_ROW2.getRow().get().getColumns()), is(true));
}
Aggregations