Search in sources :

Example 36 with TestProcessingTimeService

use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.

the class StreamSourceOperatorLatencyMetricsTest method testLatencyMarkEmission.

private void testLatencyMarkEmission(int numberLatencyMarkers, OperatorSetupOperation operatorSetup) throws Exception {
    final List<StreamElement> output = new ArrayList<>();
    final TestProcessingTimeService testProcessingTimeService = new TestProcessingTimeService();
    testProcessingTimeService.setCurrentTime(0L);
    final List<Long> processingTimes = Arrays.asList(1L, 10L, 11L, 21L, maxProcessingTime);
    // regular stream source operator
    final StreamSource<Long, ProcessingTimeServiceSource> operator = new StreamSource<>(new ProcessingTimeServiceSource(testProcessingTimeService, processingTimes));
    operatorSetup.setupSourceOperator(operator, testProcessingTimeService);
    // run and wait to be stopped
    OperatorChain<?, ?> operatorChain = new RegularOperatorChain<>(operator.getContainingTask(), StreamTask.createRecordWriterDelegate(operator.getOperatorConfig(), new MockEnvironmentBuilder().build()));
    try {
        operator.run(new Object(), new CollectorOutput<>(output), operatorChain);
        operator.finish();
    } finally {
        operatorChain.close();
    }
    assertEquals(numberLatencyMarkers, output.size());
    long timestamp = 0L;
    int expectedLatencyIndex = 0;
    int i = 0;
    // verify that its only latency markers
    for (; i < numberLatencyMarkers; i++) {
        StreamElement se = output.get(i);
        Assert.assertTrue(se.isLatencyMarker());
        Assert.assertEquals(operator.getOperatorID(), se.asLatencyMarker().getOperatorId());
        Assert.assertEquals(0, se.asLatencyMarker().getSubtaskIndex());
        // as a result of which we never emit both 10 and 11
        while (timestamp > processingTimes.get(expectedLatencyIndex)) {
            expectedLatencyIndex++;
        }
        Assert.assertEquals(processingTimes.get(expectedLatencyIndex).longValue(), se.asLatencyMarker().getMarkedTime());
        timestamp += latencyMarkInterval;
    }
}
Also used : MockEnvironmentBuilder(org.apache.flink.runtime.operators.testutils.MockEnvironmentBuilder) StreamSource(org.apache.flink.streaming.api.operators.StreamSource) ArrayList(java.util.ArrayList) StreamElement(org.apache.flink.streaming.runtime.streamrecord.StreamElement) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) RegularOperatorChain(org.apache.flink.streaming.runtime.tasks.RegularOperatorChain)

Example 37 with TestProcessingTimeService

use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.

the class InputProcessorUtilTest method testCreateCheckpointedMultipleInputGate.

@Test
public void testCreateCheckpointedMultipleInputGate() throws Exception {
    try (CloseableRegistry registry = new CloseableRegistry()) {
        MockEnvironment environment = new MockEnvironmentBuilder().build();
        MockStreamTask streamTask = new MockStreamTaskBuilder(environment).build();
        StreamConfig streamConfig = new StreamConfig(environment.getJobConfiguration());
        streamConfig.setCheckpointMode(CheckpointingMode.EXACTLY_ONCE);
        streamConfig.setUnalignedCheckpointsEnabled(true);
        // First input gate has index larger than the second
        List<IndexedInputGate>[] inputGates = new List[] { Collections.singletonList(getGate(1, 4)), Collections.singletonList(getGate(0, 2)) };
        CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler(streamTask, streamConfig, new TestSubtaskCheckpointCoordinator(new MockChannelStateWriter()), streamTask.getName(), inputGates, Collections.emptyList(), new SyncMailboxExecutor(), new TestProcessingTimeService());
        CheckpointedInputGate[] checkpointedMultipleInputGate = InputProcessorUtil.createCheckpointedMultipleInputGate(new SyncMailboxExecutor(), inputGates, environment.getMetricGroup().getIOMetricGroup(), barrierHandler, streamConfig);
        for (CheckpointedInputGate checkpointedInputGate : checkpointedMultipleInputGate) {
            registry.registerCloseable(checkpointedInputGate);
        }
        List<IndexedInputGate> allInputGates = Arrays.stream(inputGates).flatMap(gates -> gates.stream()).collect(Collectors.toList());
        for (IndexedInputGate inputGate : allInputGates) {
            for (int channelId = 0; channelId < inputGate.getNumberOfInputChannels(); channelId++) {
                barrierHandler.processBarrier(new CheckpointBarrier(1, 42, CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, CheckpointStorageLocationReference.getDefault())), new InputChannelInfo(inputGate.getGateIndex(), channelId), false);
            }
        }
        assertTrue(barrierHandler.getAllBarriersReceivedFuture(1).isDone());
    }
}
Also used : MockStreamTaskBuilder(org.apache.flink.streaming.util.MockStreamTaskBuilder) MockChannelStateWriter(org.apache.flink.runtime.checkpoint.channel.MockChannelStateWriter) Arrays(java.util.Arrays) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) CheckpointingMode(org.apache.flink.streaming.api.CheckpointingMode) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) TestSubtaskCheckpointCoordinator(org.apache.flink.streaming.runtime.tasks.TestSubtaskCheckpointCoordinator) MockStreamTask(org.apache.flink.streaming.util.MockStreamTask) MockEnvironment(org.apache.flink.runtime.operators.testutils.MockEnvironment) InputChannelInfo(org.apache.flink.runtime.checkpoint.channel.InputChannelInfo) MockChannelStateWriter(org.apache.flink.runtime.checkpoint.channel.MockChannelStateWriter) SingleInputGate(org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate) SyncMailboxExecutor(org.apache.flink.runtime.mailbox.SyncMailboxExecutor) CheckpointStorageLocationReference(org.apache.flink.runtime.state.CheckpointStorageLocationReference) CheckpointType(org.apache.flink.runtime.checkpoint.CheckpointType) SingleInputGateBuilder(org.apache.flink.runtime.io.network.partition.consumer.SingleInputGateBuilder) InputChannelBuilder(org.apache.flink.runtime.io.network.partition.consumer.InputChannelBuilder) MockStreamTaskBuilder(org.apache.flink.streaming.util.MockStreamTaskBuilder) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) CheckpointOptions(org.apache.flink.runtime.checkpoint.CheckpointOptions) MockEnvironmentBuilder(org.apache.flink.runtime.operators.testutils.MockEnvironmentBuilder) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) Collectors(java.util.stream.Collectors) List(java.util.List) CheckpointBarrier(org.apache.flink.runtime.io.network.api.CheckpointBarrier) IndexedInputGate(org.apache.flink.runtime.io.network.partition.consumer.IndexedInputGate) Collections(java.util.Collections) MockEnvironmentBuilder(org.apache.flink.runtime.operators.testutils.MockEnvironmentBuilder) InputChannelInfo(org.apache.flink.runtime.checkpoint.channel.InputChannelInfo) SyncMailboxExecutor(org.apache.flink.runtime.mailbox.SyncMailboxExecutor) MockStreamTask(org.apache.flink.streaming.util.MockStreamTask) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) IndexedInputGate(org.apache.flink.runtime.io.network.partition.consumer.IndexedInputGate) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) TestSubtaskCheckpointCoordinator(org.apache.flink.streaming.runtime.tasks.TestSubtaskCheckpointCoordinator) CheckpointBarrier(org.apache.flink.runtime.io.network.api.CheckpointBarrier) MockEnvironment(org.apache.flink.runtime.operators.testutils.MockEnvironment) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) List(java.util.List) Test(org.junit.Test)

Example 38 with TestProcessingTimeService

use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.

the class TestProcessingTimeServiceTest method testCustomTimeServiceProvider.

@Test
public void testCustomTimeServiceProvider() throws Throwable {
    final TestProcessingTimeService tp = new TestProcessingTimeService();
    final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>((env) -> new OneInputStreamTask<>(env, tp), BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
    testHarness.setupOutputForSingletonOperatorChain();
    StreamConfig streamConfig = testHarness.getStreamConfig();
    StreamMap<String, String> mapOperator = new StreamMap<>(new StreamTaskTimerTest.DummyMapFunction<>());
    streamConfig.setStreamOperator(mapOperator);
    streamConfig.setOperatorID(new OperatorID());
    testHarness.invoke();
    testHarness.waitForTaskRunning();
    ProcessingTimeService processingTimeService = ((StreamMap<?, ?>) testHarness.getHeadOperator()).getProcessingTimeService();
    assertEquals(Long.MIN_VALUE, processingTimeService.getCurrentProcessingTime());
    tp.setCurrentTime(11);
    assertEquals(processingTimeService.getCurrentProcessingTime(), 11);
    tp.setCurrentTime(15);
    tp.setCurrentTime(16);
    assertEquals(processingTimeService.getCurrentProcessingTime(), 16);
    // register 2 tasks
    processingTimeService.registerTimer(30, timestamp -> {
    });
    processingTimeService.registerTimer(40, timestamp -> {
    });
    assertEquals(2, tp.getNumActiveTimers());
    tp.setCurrentTime(35);
    assertEquals(1, tp.getNumActiveTimers());
    tp.setCurrentTime(40);
    assertEquals(0, tp.getNumActiveTimers());
    tp.shutdownService();
}
Also used : ProcessingTimeService(org.apache.flink.streaming.runtime.tasks.ProcessingTimeService) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) OneInputStreamTaskTestHarness(org.apache.flink.streaming.runtime.tasks.OneInputStreamTaskTestHarness) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) StreamMap(org.apache.flink.streaming.api.operators.StreamMap) Test(org.junit.Test)

Example 39 with TestProcessingTimeService

use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.

the class Kafka09FetcherTest method ensureOffsetsGetCommitted.

@Test
public void ensureOffsetsGetCommitted() throws Exception {
    // test data
    final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
    final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
    final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
    testCommitData1.put(testPartition1, 11L);
    testCommitData1.put(testPartition2, 18L);
    final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
    testCommitData2.put(testPartition1, 19L);
    testCommitData2.put(testPartition2, 28L);
    final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
    // ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
    final MultiShotLatch blockerLatch = new MultiShotLatch();
    KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
            blockerLatch.await();
            return ConsumerRecords.empty();
        }
    });
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            blockerLatch.trigger();
            return null;
        }
    }).when(mockConsumer).wakeup();
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            @SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
            OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
            commitStore.add(offsets);
            callback.onComplete(offsets, null);
            return null;
        }
    }).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
    // make sure the fetcher creates the mock consumer
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- create the test fetcher -----
    @SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // ----- trigger the first offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData1);
    Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(12L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(17L, entry.getValue().offset());
        }
    }
    // ----- trigger the second offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData2);
    Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(20L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(27L, entry.getValue().offset());
        }
    }
    // ----- test done, wait till the fetcher is done for a clean shutdown -----
    fetcher.cancel();
    fetcherRunner.join();
    // check that there were no errors in the fetcher
    final Throwable caughtError = error.get();
    if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
        throw new Exception("Exception in the fetcher", caughtError);
    }
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) MultiShotLatch(org.apache.flink.core.testutils.MultiShotLatch) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Handover(org.apache.flink.streaming.connectors.kafka.internal.Handover) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) Kafka09Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka09Fetcher) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) HashMap(java.util.HashMap) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 40 with TestProcessingTimeService

use of org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService in project flink by apache.

the class AbstractFetcherTest method testPunctuatedWatermarks.

// ------------------------------------------------------------------------
//   Timestamps & watermarks tests
// ------------------------------------------------------------------------
@Test
public void testPunctuatedWatermarks() throws Exception {
    final String testTopic = "test topic name";
    Map<KafkaTopicPartition, Long> originalPartitions = new HashMap<>();
    originalPartitions.put(new KafkaTopicPartition(testTopic, 7), KafkaTopicPartitionStateSentinel.LATEST_OFFSET);
    originalPartitions.put(new KafkaTopicPartition(testTopic, 13), KafkaTopicPartitionStateSentinel.LATEST_OFFSET);
    originalPartitions.put(new KafkaTopicPartition(testTopic, 21), KafkaTopicPartitionStateSentinel.LATEST_OFFSET);
    TestSourceContext<Long> sourceContext = new TestSourceContext<>();
    TestProcessingTimeService processingTimeProvider = new TestProcessingTimeService();
    TestFetcher<Long> fetcher = new TestFetcher<>(sourceContext, originalPartitions, null, /* periodic watermark assigner */
    new SerializedValue<AssignerWithPunctuatedWatermarks<Long>>(new PunctuatedTestExtractor()), processingTimeProvider, 0);
    final KafkaTopicPartitionState<Object> part1 = fetcher.subscribedPartitionStates()[0];
    final KafkaTopicPartitionState<Object> part2 = fetcher.subscribedPartitionStates()[1];
    final KafkaTopicPartitionState<Object> part3 = fetcher.subscribedPartitionStates()[2];
    // elements generate a watermark if the timestamp is a multiple of three
    // elements for partition 1
    fetcher.emitRecord(1L, part1, 1L);
    fetcher.emitRecord(2L, part1, 2L);
    fetcher.emitRecord(3L, part1, 3L);
    assertEquals(3L, sourceContext.getLatestElement().getValue().longValue());
    assertEquals(3L, sourceContext.getLatestElement().getTimestamp());
    assertFalse(sourceContext.hasWatermark());
    // elements for partition 2
    fetcher.emitRecord(12L, part2, 1L);
    assertEquals(12L, sourceContext.getLatestElement().getValue().longValue());
    assertEquals(12L, sourceContext.getLatestElement().getTimestamp());
    assertFalse(sourceContext.hasWatermark());
    // elements for partition 3
    fetcher.emitRecord(101L, part3, 1L);
    fetcher.emitRecord(102L, part3, 2L);
    assertEquals(102L, sourceContext.getLatestElement().getValue().longValue());
    assertEquals(102L, sourceContext.getLatestElement().getTimestamp());
    // now, we should have a watermark
    assertTrue(sourceContext.hasWatermark());
    assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp());
    // advance partition 3
    fetcher.emitRecord(1003L, part3, 3L);
    fetcher.emitRecord(1004L, part3, 4L);
    fetcher.emitRecord(1005L, part3, 5L);
    assertEquals(1005L, sourceContext.getLatestElement().getValue().longValue());
    assertEquals(1005L, sourceContext.getLatestElement().getTimestamp());
    // advance partition 1 beyond partition 2 - this bumps the watermark
    fetcher.emitRecord(30L, part1, 4L);
    assertEquals(30L, sourceContext.getLatestElement().getValue().longValue());
    assertEquals(30L, sourceContext.getLatestElement().getTimestamp());
    assertTrue(sourceContext.hasWatermark());
    assertEquals(12L, sourceContext.getLatestWatermark().getTimestamp());
    // advance partition 2 again - this bumps the watermark
    fetcher.emitRecord(13L, part2, 2L);
    assertFalse(sourceContext.hasWatermark());
    fetcher.emitRecord(14L, part2, 3L);
    assertFalse(sourceContext.hasWatermark());
    fetcher.emitRecord(15L, part2, 3L);
    assertTrue(sourceContext.hasWatermark());
    assertEquals(15L, sourceContext.getLatestWatermark().getTimestamp());
}
Also used : HashMap(java.util.HashMap) AssignerWithPunctuatedWatermarks(org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) Test(org.junit.Test)

Aggregations

TestProcessingTimeService (org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService)78 Test (org.junit.Test)66 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)17 HashMap (java.util.HashMap)16 UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)15 ArrayList (java.util.ArrayList)14 CloseableRegistry (org.apache.flink.core.fs.CloseableRegistry)11 MemoryStateBackend (org.apache.flink.runtime.state.memory.MemoryStateBackend)10 InvocationOnMock (org.mockito.invocation.InvocationOnMock)10 Watermark (org.apache.flink.streaming.api.watermark.Watermark)9 List (java.util.List)8 JobID (org.apache.flink.api.common.JobID)8 MockEnvironment (org.apache.flink.runtime.operators.testutils.MockEnvironment)8 VoidNamespace (org.apache.flink.runtime.state.VoidNamespace)8 VoidNamespaceSerializer (org.apache.flink.runtime.state.VoidNamespaceSerializer)8 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 Properties (java.util.Properties)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 BiConsumer (java.util.function.BiConsumer)5