use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.
the class ScalablePushConsumer method getOffsetVector.
private static PushOffsetVector getOffsetVector(final Map<TopicPartition, Long> offsets, final String topic, final int numPartitions) {
final List<Long> offsetList = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
final TopicPartition tp = new TopicPartition(topic, i);
offsetList.add(offsets.getOrDefault(tp, -1L));
}
return new PushOffsetVector(offsetList);
}
use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.
the class CatchupConsumerTest method shouldRunConsumer_success_waitForLatestAssignment.
@Test
public void shouldRunConsumer_success_waitForLatestAssignment() {
// Given:
PushOffsetRange offsetRange = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(1L, 2L)));
when(latestConsumer.getAssignment()).thenReturn(null);
AtomicReference<CatchupConsumer> cRef = new AtomicReference<>();
// Rather than wait, simulate the latest getting an assignment
final BiConsumer<Object, Long> waitFn = (o, wait) -> cRef.get().newAssignment(ImmutableSet.of(TP0, TP1));
try (CatchupConsumer consumer = new CatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, offsetRange, clock, sleepFn, waitFn, 0, pq -> caughtUp = true)) {
cRef.set(consumer);
runSuccessfulTest(consumer);
}
}
use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.
the class CatchupConsumerTest method shouldRunConsumer_queueIsAtLimit.
@Test
public void shouldRunConsumer_queueIsAtLimit() {
// Given:
PushOffsetRange offsetRange = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(1L, 2L)));
when(queue.isAtLimit()).thenReturn(false, true, true, false);
try (CatchupConsumer consumer = new CatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, offsetRange, clock, sleepFn, waitFn, 0, pq -> caughtUp = true)) {
// When:
consumer.register(queue);
runSuccessfulTest(consumer);
verify(sleepFn, times(2)).accept(any());
}
}
use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.
the class ScalablePushQueryFunctionalTest method shouldCatchupFromSomeToken.
@Test
public void shouldCatchupFromSomeToken() throws ExecutionException, InterruptedException {
assertAllPersistentQueriesRunning(true);
TEST_HARNESS.produceRows(pageViewDataProvider.topicName(), pageViewDataProvider, FormatFactory.KAFKA, FormatFactory.JSON);
final CompletableFuture<StreamedRow> header = new CompletableFuture<>();
final CompletableFuture<List<StreamedRow>> complete = new CompletableFuture<>();
final PushOffsetRange range = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(0L, 0L)));
makeRequestAndSetupSubscriber("SELECT USERID, PAGEID, VIEWTIME from " + streamName + " EMIT CHANGES;", ImmutableMap.of("auto.offset.reset", "latest"), ImmutableMap.of(KsqlRequestConfig.KSQL_REQUEST_QUERY_PUSH_CONTINUATION_TOKEN, range.serialize()), header, complete);
header.get();
assertThatEventually(() -> subscriber.getUniqueRows().size(), is(pageViewDataProvider.data().size() + 1));
List<StreamedRow> orderedRows = subscriber.getUniqueRows().stream().sorted(this::compareByTimestamp).collect(Collectors.toList());
assertFirstBatchOfRows(orderedRows);
}
use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.
the class ScalablePushConsumer method computeProgressToken.
private void computeProgressToken(final Optional<PushOffsetVector> givenStartOffsetVector) {
final PushOffsetVector endOffsetVector = getOffsetVector(currentPositions.get(), topicName, partitions);
final PushOffsetVector startOffsetVector = givenStartOffsetVector.orElse(endOffsetVector);
handleProgressToken(startOffsetVector, endOffsetVector);
}
Aggregations