use of io.confluent.ksql.util.ConsistencyOffsetVector in project ksql by confluentinc.
the class RestApiTest method verifyConsistencyVector.
/**
* The format of the json string is
* "{\"consistencyToken\":{\"consistencyToken\":" + "\" + CT + \"}}"
*/
private static void verifyConsistencyVector(final String consistencyText, final ConsistencyOffsetVector consistencyOffsetVector) {
String serializedCV = consistencyText.split(":\"")[1];
serializedCV = serializedCV.substring(0, serializedCV.length() - 4);
final ConsistencyOffsetVector cvResponse = ConsistencyOffsetVector.deserialize(serializedCV);
assertThat(cvResponse.equals(consistencyOffsetVector), is(true));
}
use of io.confluent.ksql.util.ConsistencyOffsetVector in project ksql by confluentinc.
the class PullQueryConsistencyFunctionalTest method verifyConsistencyVector.
private static void verifyConsistencyVector(final String consistencyText, final ConsistencyOffsetVector consistencyOffsetVector) {
String serializedCV = consistencyText.split(":\"")[1];
serializedCV = serializedCV.substring(0, serializedCV.length() - 4);
final ConsistencyOffsetVector cvResponse = ConsistencyOffsetVector.deserialize(serializedCV);
assertThat(cvResponse, is(consistencyOffsetVector));
}
use of io.confluent.ksql.util.ConsistencyOffsetVector in project ksql by confluentinc.
the class PullQueryConsistencyFunctionalTest method shouldExecuteThenFailPullQueryWithBound.
@Test
public void shouldExecuteThenFailPullQueryWithBound() throws Exception {
// Given:
ClusterFormation clusterFormation = findClusterFormation(TEST_APP_0, TEST_APP_1, TEST_APP_2);
waitForClusterToBeDiscovered(clusterFormation.router.getApp(), 3, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.router.getHost(), HighAvailabilityTestUtil.lagsReported(3), USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.active.getHost(), HighAvailabilityTestUtil::remoteServerIsUp, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.standBy.getHost(), HighAvailabilityTestUtil::remoteServerIsUp, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.router.getHost(), HighAvailabilityTestUtil.lagsReported(clusterFormation.standBy.getHost(), Optional.of(5L), 5), USER_CREDS);
// Cut off standby from Kafka to simulate lag
clusterFormation.standBy.getShutoffs().setKafkaPauseOffset(0);
Thread.sleep(2000);
// Produce more data that will now only be available on active since standby is cut off
TEST_HARNESS.produceRows(topic, USER_PROVIDER, FormatFactory.KAFKA, FormatFactory.JSON, timestampSupplier::getAndIncrement);
// Make sure that the lags get reported before issuing query
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.router.getHost(), HighAvailabilityTestUtil.lagsReported(clusterFormation.active.getHost(), Optional.of(10L), 10), USER_CREDS);
final KsqlRestClient restClient = clusterFormation.router.getApp().buildKsqlClient(USER_CREDS, ConsistencyLevel.MONOTONIC_SESSION);
final ImmutableMap<String, Object> requestProperties = ImmutableMap.of(KSQL_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
// When:
RestResponse<List<StreamedRow>> res = restClient.makeQueryRequest(sql, 1L, null, requestProperties);
// Then:
List<StreamedRow> rows = res.getResponse();
assertThat(rows, hasSize(HEADER + 2));
assertThat(rows.get(1).getRow(), is(not(Optional.empty())));
assertThat(rows.get(1).getRow().get().getColumns(), is(ImmutableList.of(KEY1, 2)));
assertThat(rows.get(2).getConsistencyToken(), is(not(Optional.empty())));
ConsistencyOffsetVector cvResponse = ConsistencyOffsetVector.deserialize(rows.get(2).getConsistencyToken().get().getConsistencyToken());
assertThat(cvResponse, is(CONSISTENCY_OFFSET_VECTOR_AFTER_10));
// Given:
// Partition active off
clusterFormation.active.getShutoffs().shutOffAll();
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.standBy.getHost(), HighAvailabilityTestUtil::remoteServerIsUp, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.active.getHost(), HighAvailabilityTestUtil::remoteServerIsDown, USER_CREDS);
final ImmutableMap.Builder<String, Object> builder = new ImmutableMap.Builder<String, Object>().put(KSQL_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR_ENABLED, true).put(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR, cvResponse.serialize());
final Map<String, Object> requestProperties2 = builder.build();
// When:
final Supplier<List<String>> call = () -> {
final String response = rawRestQueryRequest(clusterFormation.router.getApp(), sql, MediaType.APPLICATION_JSON, Collections.emptyMap(), requestProperties2);
return Arrays.asList(response.split(System.lineSeparator()));
};
// Then:
final List<String> messages = assertThatEventually(call, hasSize(HEADER + 1));
assertThat(messages, hasSize(HEADER + 1));
assertThat(messages.get(1), containsString("Failed to get value from materialized table, " + "reason: NOT_UP_TO_BOUND"));
}
use of io.confluent.ksql.util.ConsistencyOffsetVector in project ksql by confluentinc.
the class PullQueryIQv2FunctionalTest method verifyConsistencyVector.
private static void verifyConsistencyVector(final String serializedCV, final ConsistencyOffsetVector offsetVector) {
final ConsistencyOffsetVector cvResponse = ConsistencyOffsetVector.deserialize(serializedCV);
assertThat(cvResponse, is(offsetVector));
}
use of io.confluent.ksql.util.ConsistencyOffsetVector in project ksql by confluentinc.
the class HARouting method executeRounds.
private void executeRounds(final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final LogicalSchema outputSchema, final QueryId queryId, final List<KsqlPartitionLocation> locations, final PullQueryQueue pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) throws InterruptedException {
final ExecutorCompletionService<PartitionFetchResult> completionService = new ExecutorCompletionService<>(routerExecutorService);
final int totalPartitions = locations.size();
int processedPartitions = 0;
final Map<Integer, List<Exception>> exceptionsPerPartition = new HashMap<>();
for (final KsqlPartitionLocation partition : locations) {
final KsqlNode node = getNodeForRound(partition, routingOptions);
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordPartitionFetchRequest(1));
completionService.submit(() -> routeQuery.routeQuery(node, partition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
}
while (processedPartitions < totalPartitions) {
final Future<PartitionFetchResult> future = completionService.take();
try {
final PartitionFetchResult fetchResult = future.get();
if (fetchResult.isError()) {
exceptionsPerPartition.computeIfAbsent(fetchResult.location.getPartition(), v -> new ArrayList<>()).add(fetchResult.exception.get());
final KsqlPartitionLocation nextRoundPartition = nextNode(fetchResult.getLocation());
final KsqlNode node = getNodeForRound(nextRoundPartition, routingOptions);
pullQueryMetrics.ifPresent(queryExecutorMetrics -> queryExecutorMetrics.recordResubmissionRequest(1));
completionService.submit(() -> routeQuery.routeQuery(node, nextRoundPartition, statement, serviceContext, routingOptions, pullQueryMetrics, pullPhysicalPlan, outputSchema, queryId, pullQueryQueue, shouldCancelRequests, consistencyOffsetVector));
} else {
Preconditions.checkState(fetchResult.getResult() == RoutingResult.SUCCESS);
processedPartitions++;
}
} catch (final Exception e) {
final MaterializationException exception = new MaterializationException("Unable to execute pull query: " + e.getMessage());
for (Entry<Integer, List<Exception>> entry : exceptionsPerPartition.entrySet()) {
for (Exception excp : entry.getValue()) {
exception.addSuppressed(excp);
}
}
throw exception;
}
}
pullQueryQueue.close();
}
Aggregations