Search in sources :

Example 1 with PushOffsetVector

use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.

the class ScalablePushConsumer method getOffsetVector.

private static PushOffsetVector getOffsetVector(final Map<TopicPartition, Long> offsets, final String topic, final int numPartitions) {
    final List<Long> offsetList = new ArrayList<>();
    for (int i = 0; i < numPartitions; i++) {
        final TopicPartition tp = new TopicPartition(topic, i);
        offsetList.add(offsets.getOrDefault(tp, -1L));
    }
    return new PushOffsetVector(offsetList);
}
Also used : PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArrayList(java.util.ArrayList)

Example 2 with PushOffsetVector

use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.

the class CatchupConsumerTest method shouldRunConsumer_success_waitForLatestAssignment.

@Test
public void shouldRunConsumer_success_waitForLatestAssignment() {
    // Given:
    PushOffsetRange offsetRange = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(1L, 2L)));
    when(latestConsumer.getAssignment()).thenReturn(null);
    AtomicReference<CatchupConsumer> cRef = new AtomicReference<>();
    // Rather than wait, simulate the latest getting an assignment
    final BiConsumer<Object, Long> waitFn = (o, wait) -> cRef.get().newAssignment(ImmutableSet.of(TP0, TP1));
    try (CatchupConsumer consumer = new CatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, offsetRange, clock, sleepFn, waitFn, 0, pq -> caughtUp = true)) {
        cRef.set(consumer);
        runSuccessfulTest(consumer);
    }
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) Mock(org.mockito.Mock) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) RECORD0_1(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD0_1) QR0_2(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR0_2) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) QR1_2(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR1_2) AtomicReference(java.util.concurrent.atomic.AtomicReference) QR1_3(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR1_3) CommonTestUtil.verifyQueryRows(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.verifyQueryRows) QR0_1(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR0_1) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) ImmutableList(com.google.common.collect.ImmutableList) BiConsumer(java.util.function.BiConsumer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) QueryId(io.confluent.ksql.query.QueryId) CommonTestUtil.expectPoll(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.expectPoll) Before(org.junit.Before) ImmutableSet(com.google.common.collect.ImmutableSet) ProcessingQueue(io.confluent.ksql.physical.scalablepush.ProcessingQueue) ImmutableMap(com.google.common.collect.ImmutableMap) CommonTestUtil.offsetsRow(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.offsetsRow) TP1(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.TP1) EMPTY_RECORDS(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.EMPTY_RECORDS) TP0(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.TP0) Mockito.times(org.mockito.Mockito.times) Test(org.junit.Test) Mockito.when(org.mockito.Mockito.when) PartitionInfo(org.apache.kafka.common.PartitionInfo) Mockito.verify(org.mockito.Mockito.verify) Consumer(java.util.function.Consumer) WAIT_FOR_ASSIGNMENT_MS(io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer.WAIT_FOR_ASSIGNMENT_MS) RECORD0_2(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD0_2) RECORD1_2(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD1_2) RECORD1_3(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD1_3) GenericRow(io.confluent.ksql.GenericRow) TOPIC(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.TOPIC) KsqlException(io.confluent.ksql.util.KsqlException) Clock(java.time.Clock) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) SCHEMA(io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.SCHEMA) Matchers.containsString(org.hamcrest.Matchers.containsString) PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) MockitoJUnitRunner(org.mockito.junit.MockitoJUnitRunner) Mockito.mock(org.mockito.Mockito.mock) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) AtomicReference(java.util.concurrent.atomic.AtomicReference) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) Test(org.junit.Test)

Example 3 with PushOffsetVector

use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.

the class CatchupConsumerTest method shouldRunConsumer_queueIsAtLimit.

@Test
public void shouldRunConsumer_queueIsAtLimit() {
    // Given:
    PushOffsetRange offsetRange = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(1L, 2L)));
    when(queue.isAtLimit()).thenReturn(false, true, true, false);
    try (CatchupConsumer consumer = new CatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, offsetRange, clock, sleepFn, waitFn, 0, pq -> caughtUp = true)) {
        // When:
        consumer.register(queue);
        runSuccessfulTest(consumer);
        verify(sleepFn, times(2)).accept(any());
    }
}
Also used : PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) Test(org.junit.Test)

Example 4 with PushOffsetVector

use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.

the class ScalablePushQueryFunctionalTest method shouldCatchupFromSomeToken.

@Test
public void shouldCatchupFromSomeToken() throws ExecutionException, InterruptedException {
    assertAllPersistentQueriesRunning(true);
    TEST_HARNESS.produceRows(pageViewDataProvider.topicName(), pageViewDataProvider, FormatFactory.KAFKA, FormatFactory.JSON);
    final CompletableFuture<StreamedRow> header = new CompletableFuture<>();
    final CompletableFuture<List<StreamedRow>> complete = new CompletableFuture<>();
    final PushOffsetRange range = new PushOffsetRange(Optional.empty(), new PushOffsetVector(ImmutableList.of(0L, 0L)));
    makeRequestAndSetupSubscriber("SELECT USERID, PAGEID, VIEWTIME from " + streamName + " EMIT CHANGES;", ImmutableMap.of("auto.offset.reset", "latest"), ImmutableMap.of(KsqlRequestConfig.KSQL_REQUEST_QUERY_PUSH_CONTINUATION_TOKEN, range.serialize()), header, complete);
    header.get();
    assertThatEventually(() -> subscriber.getUniqueRows().size(), is(pageViewDataProvider.data().size() + 1));
    List<StreamedRow> orderedRows = subscriber.getUniqueRows().stream().sorted(this::compareByTimestamp).collect(Collectors.toList());
    assertFirstBatchOfRows(orderedRows);
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) PushOffsetVector(io.confluent.ksql.util.PushOffsetVector) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PushOffsetRange(io.confluent.ksql.util.PushOffsetRange) IntegrationTest(io.confluent.common.utils.IntegrationTest) Test(org.junit.Test)

Example 5 with PushOffsetVector

use of io.confluent.ksql.util.PushOffsetVector in project ksql by confluentinc.

the class ScalablePushConsumer method computeProgressToken.

private void computeProgressToken(final Optional<PushOffsetVector> givenStartOffsetVector) {
    final PushOffsetVector endOffsetVector = getOffsetVector(currentPositions.get(), topicName, partitions);
    final PushOffsetVector startOffsetVector = givenStartOffsetVector.orElse(endOffsetVector);
    handleProgressToken(startOffsetVector, endOffsetVector);
}
Also used : PushOffsetVector(io.confluent.ksql.util.PushOffsetVector)

Aggregations

PushOffsetVector (io.confluent.ksql.util.PushOffsetVector)7 PushOffsetRange (io.confluent.ksql.util.PushOffsetRange)4 Test (org.junit.Test)4 ImmutableList (com.google.common.collect.ImmutableList)2 GenericRow (io.confluent.ksql.GenericRow)2 KsqlException (io.confluent.ksql.util.KsqlException)2 ArrayList (java.util.ArrayList)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 IntegrationTest (io.confluent.common.utils.IntegrationTest)1 ProcessingQueue (io.confluent.ksql.physical.scalablepush.ProcessingQueue)1 WAIT_FOR_ASSIGNMENT_MS (io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer.WAIT_FOR_ASSIGNMENT_MS)1 EMPTY_RECORDS (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.EMPTY_RECORDS)1 QR0_1 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR0_1)1 QR0_2 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR0_2)1 QR1_2 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR1_2)1 QR1_3 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.QR1_3)1 RECORD0_1 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD0_1)1 RECORD0_2 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD0_2)1 RECORD1_2 (io.confluent.ksql.physical.scalablepush.consumer.CommonTestUtil.RECORD1_2)1