Search in sources :

Example 91 with Watermark

use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.

the class WindowOperatorTest method testDropDueToLatenessSessionWithLatenessPurgingTrigger.

@Test
public void testDropDueToLatenessSessionWithLatenessPurgingTrigger() throws Exception {
    // this has the same output as testSideOutputDueToLatenessSessionZeroLateness() because
    // the allowed lateness is too small to make a difference
    final int gapSize = 3;
    final long lateness = 10;
    ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
    WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(EventTimeSessionWindows.withGap(Time.seconds(gapSize)), new TimeWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new ReducedSessionWindowFunction()), PurgingTrigger.of(EventTimeTrigger.create()), lateness, lateOutputTag);
    OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness = createTestHarness(operator);
    testHarness.open();
    ConcurrentLinkedQueue<Object> expected = new ConcurrentLinkedQueue<>();
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1000));
    testHarness.processWatermark(new Watermark(1999));
    expected.add(new Watermark(1999));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 2000));
    testHarness.processWatermark(new Watermark(4998));
    expected.add(new Watermark(4998));
    // this will not be dropped because the session we're adding two has maxTimestamp
    // after the current watermark
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 4500));
    // new session
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 8500));
    testHarness.processWatermark(new Watermark(7400));
    expected.add(new Watermark(7400));
    // this will merge the two sessions into one
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 7000));
    testHarness.processWatermark(new Watermark(11501));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-5", 1000L, 11500L), 11499));
    expected.add(new Watermark(11501));
    // new session
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 11600));
    testHarness.processWatermark(new Watermark(14600));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 11600L, 14600L), 14599));
    expected.add(new Watermark(14600));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 10000));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 10000L, 14600L), 14599));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 14500));
    testHarness.processWatermark(new Watermark(20000));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 10000L, 17500L), 17499));
    expected.add(new Watermark(20000));
    testHarness.processWatermark(new Watermark(100000));
    expected.add(new Watermark(100000));
    ConcurrentLinkedQueue<Object> actual = testHarness.getOutput();
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expected, actual, new Tuple3ResultSortComparator());
    testHarness.close();
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 92 with Watermark

use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.

the class WindowOperatorTest method testCleanupTimeOverflow.

@Test
public void testCleanupTimeOverflow() throws Exception {
    final int windowSize = 1000;
    final long lateness = 2000;
    ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
    TumblingEventTimeWindows windowAssigner = TumblingEventTimeWindows.of(Time.milliseconds(windowSize));
    final WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(windowAssigner, new TimeWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<String, TimeWindow, Tuple2<String, Integer>>()), EventTimeTrigger.create(), lateness, null);
    OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness = createTestHarness(operator);
    testHarness.open();
    ConcurrentLinkedQueue<Object> expected = new ConcurrentLinkedQueue<>();
    long timestamp = Long.MAX_VALUE - 1750;
    Collection<TimeWindow> windows = windowAssigner.assignWindows(new Tuple2<>("key2", 1), timestamp, new WindowAssigner.WindowAssignerContext() {

        @Override
        public long getCurrentProcessingTime() {
            return operator.windowAssignerContext.getCurrentProcessingTime();
        }
    });
    TimeWindow window = Iterables.getOnlyElement(windows);
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), timestamp));
    // the garbage collection timer would wrap-around
    Assert.assertTrue(window.maxTimestamp() + lateness < window.maxTimestamp());
    // and it would prematurely fire with watermark (Long.MAX_VALUE - 1500)
    Assert.assertTrue(window.maxTimestamp() + lateness < Long.MAX_VALUE - 1500);
    // if we don't correctly prevent wrap-around in the garbage collection
    // timers this watermark will clean our window state for the just-added
    // element/window
    testHarness.processWatermark(new Watermark(Long.MAX_VALUE - 1500));
    // this watermark is before the end timestamp of our only window
    Assert.assertTrue(Long.MAX_VALUE - 1500 < window.maxTimestamp());
    Assert.assertTrue(window.maxTimestamp() < Long.MAX_VALUE);
    // push in a watermark that will trigger computation of our window
    testHarness.processWatermark(new Watermark(window.maxTimestamp()));
    expected.add(new Watermark(Long.MAX_VALUE - 1500));
    expected.add(new StreamRecord<>(new Tuple2<>("key2", 1), window.maxTimestamp()));
    expected.add(new Watermark(window.maxTimestamp()));
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expected, testHarness.getOutput(), new Tuple2ResultSortComparator());
    testHarness.close();
}
Also used : ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) TumblingEventTimeWindows(org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) WindowAssigner(org.apache.flink.streaming.api.windowing.assigners.WindowAssigner) PassThroughWindowFunction(org.apache.flink.streaming.api.functions.windowing.PassThroughWindowFunction) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 93 with Watermark

use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.

the class WindowOperatorTest method testSideOutputDueToLatenessSessionZeroLatenessPurgingTrigger.

@Test
public void testSideOutputDueToLatenessSessionZeroLatenessPurgingTrigger() throws Exception {
    final int gapSize = 3;
    final long lateness = 0;
    ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
    WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(EventTimeSessionWindows.withGap(Time.seconds(gapSize)), new TimeWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new ReducedSessionWindowFunction()), PurgingTrigger.of(EventTimeTrigger.create()), lateness, lateOutputTag);
    OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness = createTestHarness(operator);
    testHarness.open();
    ConcurrentLinkedQueue<Object> expected = new ConcurrentLinkedQueue<>();
    ConcurrentLinkedQueue<Object> sideExpected = new ConcurrentLinkedQueue<>();
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1000));
    testHarness.processWatermark(new Watermark(1999));
    expected.add(new Watermark(1999));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 2000));
    testHarness.processWatermark(new Watermark(4998));
    expected.add(new Watermark(4998));
    // this will not be dropped because the session we're adding two has maxTimestamp
    // after the current watermark
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 4500));
    // new session
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 8500));
    testHarness.processWatermark(new Watermark(7400));
    expected.add(new Watermark(7400));
    // this will merge the two sessions into one
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 7000));
    testHarness.processWatermark(new Watermark(11501));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-5", 1000L, 11500L), 11499));
    expected.add(new Watermark(11501));
    // new session
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 11600));
    testHarness.processWatermark(new Watermark(14600));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 11600L, 14600L), 14599));
    expected.add(new Watermark(14600));
    // this is side output as late
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 10000));
    sideExpected.add(new StreamRecord<>(new Tuple2<>("key2", 1), 10000));
    // this is also side output as late (we test that they are not accidentally merged)
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 10100));
    sideExpected.add(new StreamRecord<>(new Tuple2<>("key2", 1), 10100));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 14500));
    testHarness.processWatermark(new Watermark(20000));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 14500L, 17500L), 17499));
    expected.add(new Watermark(20000));
    testHarness.processWatermark(new Watermark(100000));
    expected.add(new Watermark(100000));
    ConcurrentLinkedQueue<Object> actual = testHarness.getOutput();
    ConcurrentLinkedQueue<StreamRecord<Tuple2<String, Integer>>> sideActual = testHarness.getSideOutput(lateOutputTag);
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expected, actual, new Tuple2ResultSortComparator());
    TestHarnessUtil.assertOutputEqualsSorted("SideOutput was not correct.", sideExpected, (Iterable) sideActual, new Tuple2ResultSortComparator());
    testHarness.close();
}
Also used : ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 94 with Watermark

use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.

the class WindowOperatorTest method testSideOutputDueToLatenessSessionZeroLateness.

@Test
public void testSideOutputDueToLatenessSessionZeroLateness() throws Exception {
    final int gapSize = 3;
    final long lateness = 0;
    ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
    WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(EventTimeSessionWindows.withGap(Time.seconds(gapSize)), new TimeWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new ReducedSessionWindowFunction()), EventTimeTrigger.create(), lateness, lateOutputTag);
    OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness = createTestHarness(operator);
    testHarness.open();
    ConcurrentLinkedQueue<Object> expected = new ConcurrentLinkedQueue<>();
    ConcurrentLinkedQueue<Object> sideExpected = new ConcurrentLinkedQueue<>();
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1000));
    testHarness.processWatermark(new Watermark(1999));
    expected.add(new Watermark(1999));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 2000));
    testHarness.processWatermark(new Watermark(4998));
    expected.add(new Watermark(4998));
    // this will not be dropped because the session we're adding two has maxTimestamp
    // after the current watermark
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 4500));
    // new session
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 8500));
    testHarness.processWatermark(new Watermark(7400));
    expected.add(new Watermark(7400));
    // this will merge the two sessions into one
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 7000));
    testHarness.processWatermark(new Watermark(11501));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-5", 1000L, 11500L), 11499));
    expected.add(new Watermark(11501));
    // new session
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 11600));
    testHarness.processWatermark(new Watermark(14600));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 11600L, 14600L), 14599));
    expected.add(new Watermark(14600));
    // this is sideoutput as late, reuse last timestamp
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 10000));
    sideExpected.add(new StreamRecord<>(new Tuple2<>("key2", 1), 10000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 14500));
    testHarness.processWatermark(new Watermark(20000));
    expected.add(new StreamRecord<>(new Tuple3<>("key2-1", 14500L, 17500L), 17499));
    expected.add(new Watermark(20000));
    testHarness.processWatermark(new Watermark(100000));
    expected.add(new Watermark(100000));
    ConcurrentLinkedQueue<Object> actual = testHarness.getOutput();
    ConcurrentLinkedQueue<StreamRecord<Tuple2<String, Integer>>> sideActual = testHarness.getSideOutput(lateOutputTag);
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expected, actual, new Tuple2ResultSortComparator());
    TestHarnessUtil.assertOutputEqualsSorted("SideOutput was not correct.", sideExpected, (Iterable) sideActual, new Tuple2ResultSortComparator());
    testHarness.close();
}
Also used : ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 95 with Watermark

use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.

the class WindowOperatorContractTest method testProcessingElementsWithinAllowedLateness.

@Test
public void testProcessingElementsWithinAllowedLateness() throws Exception {
    WindowAssigner<Integer, TimeWindow> mockAssigner = mockTimeWindowAssigner();
    Trigger<Integer, TimeWindow> mockTrigger = mockTrigger();
    InternalWindowFunction<Iterable<Integer>, Void, Integer, TimeWindow> mockWindowFunction = mockWindowFunction();
    KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Void> testHarness = createWindowOperator(mockAssigner, mockTrigger, 20L, mockWindowFunction);
    testHarness.open();
    when(mockAssigner.assignWindows(anyInt(), anyLong(), anyAssignerContext())).thenReturn(Arrays.asList(new TimeWindow(0, 2)));
    assertEquals(0, testHarness.getOutput().size());
    assertEquals(0, testHarness.numKeyedStateEntries());
    shouldFireOnElement(mockTrigger);
    // 20 is just at the limit, window.maxTime() is 1 and allowed lateness is 20
    testHarness.processWatermark(new Watermark(20));
    testHarness.processElement(new StreamRecord<>(0, 0L));
    verify(mockWindowFunction, times(1)).process(eq(0), eq(new TimeWindow(0, 2)), anyInternalWindowContext(), intIterable(0), WindowOperatorContractTest.<Void>anyCollector());
    // clear is only called at cleanup time/GC time
    verify(mockTrigger, never()).clear(anyTimeWindow(), anyTriggerContext());
    // FIRE should not purge contents
    // window contents plus trigger state
    assertEquals(1, testHarness.numKeyedStateEntries());
    // just the GC timer
    assertEquals(1, testHarness.numEventTimeTimers());
}
Also used : TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Aggregations

Watermark (org.apache.flink.streaming.api.watermark.Watermark)318 Test (org.junit.Test)258 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)124 RowData (org.apache.flink.table.data.RowData)83 ArrayList (java.util.ArrayList)62 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)51 OperatorSubtaskState (org.apache.flink.runtime.checkpoint.OperatorSubtaskState)51 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)45 StreamRecord (org.apache.flink.streaming.runtime.streamrecord.StreamRecord)39 KeyedOneInputStreamOperatorTestHarness (org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness)39 TimeWindow (org.apache.flink.streaming.api.windowing.windows.TimeWindow)36 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)30 List (java.util.List)26 Map (java.util.Map)26 Configuration (org.apache.flink.configuration.Configuration)25 GenericRowData (org.apache.flink.table.data.GenericRowData)25 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)24 ReducingStateDescriptor (org.apache.flink.api.common.state.ReducingStateDescriptor)20 Event (org.apache.flink.cep.Event)20 SubEvent (org.apache.flink.cep.SubEvent)20