Search in sources :

Example 51 with OperatorSubtaskState

use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.

the class WindowOperatorTest method testPointSessions.

/**
 * This tests a custom Session window assigner that assigns some elements to "point windows",
 * windows that have the same timestamp for start and end.
 *
 * <p>In this test, elements that have 33 as the second tuple field will be put into a point
 * window.
 */
@Test
@SuppressWarnings("unchecked")
public void testPointSessions() throws Exception {
    closeCalled.set(0);
    WindowOperator operator = WindowOperatorBuilder.builder().withInputFields(inputFieldTypes).withShiftTimezone(shiftTimeZone).assigner(new PointSessionWindowAssigner(3000)).withEventTime(2).aggregateAndBuild(getTimeWindowAggFunction(), equaliser, accTypes, aggResultTypes, windowTypes);
    OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(operator);
    ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
    testHarness.open();
    // add elements out-of-order
    testHarness.processElement(insertRecord("key2", 1, 0L));
    testHarness.processElement(insertRecord("key2", 33, 1000L));
    // do a snapshot, close and restore again
    OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0);
    testHarness.close();
    testHarness = createTestHarness(operator);
    testHarness.setup();
    testHarness.initializeState(snapshot);
    testHarness.open();
    testHarness.processElement(insertRecord("key2", 33, 2500L));
    testHarness.processElement(insertRecord("key1", 1, 10L));
    testHarness.processElement(insertRecord("key1", 2, 1000L));
    testHarness.processElement(insertRecord("key1", 33, 2500L));
    testHarness.processWatermark(new Watermark(12000));
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 36L, 3L, localMills(10L), localMills(4000L), localMills(3999L))));
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 67L, 3L, localMills(0L), localMills(3000L), localMills(2999L))));
    expectedOutput.add(new Watermark(12000));
    assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
    testHarness.close();
    // we close once in the rest...
    assertEquals("Close was not called.", 2, closeCalled.get());
}
Also used : JoinedRowData(org.apache.flink.table.data.utils.JoinedRowData) GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) Test(org.junit.Test)

Example 52 with OperatorSubtaskState

use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.

the class WindowOperatorTest method testTumblingCountWindow.

@Test
public void testTumblingCountWindow() throws Exception {
    if (!UTC_ZONE_ID.equals(shiftTimeZone)) {
        return;
    }
    closeCalled.set(0);
    final int windowSize = 3;
    LogicalType[] windowTypes = new LogicalType[] { new BigIntType() };
    WindowOperator operator = WindowOperatorBuilder.builder().withInputFields(inputFieldTypes).withShiftTimezone(shiftTimeZone).countWindow(windowSize).aggregateAndBuild(getCountWindowAggFunction(), equaliser, accTypes, aggResultTypes, windowTypes);
    OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(operator);
    ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
    testHarness.open();
    testHarness.processElement(insertRecord("key2", 1, 0L));
    testHarness.processElement(insertRecord("key2", 2, 1000L));
    testHarness.processElement(insertRecord("key2", 3, 2500L));
    testHarness.processElement(insertRecord("key1", 1, 10L));
    testHarness.processElement(insertRecord("key1", 2, 1000L));
    testHarness.processWatermark(new Watermark(12000));
    testHarness.setProcessingTime(12000L);
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 6L, 3L, 0L)));
    expectedOutput.add(new Watermark(12000));
    assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
    // do a snapshot, close and restore again
    OperatorSubtaskState snapshotV2 = testHarness.snapshot(0L, 0);
    testHarness.close();
    expectedOutput.clear();
    testHarness = createTestHarness(operator);
    testHarness.setup();
    testHarness.initializeState(snapshotV2);
    testHarness.open();
    testHarness.processElement(insertRecord("key1", 2, 2500L));
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 5L, 3L, 0L)));
    assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
    testHarness.processElement(insertRecord("key2", 4, 5501L));
    testHarness.processElement(insertRecord("key2", 5, 6000L));
    testHarness.processElement(insertRecord("key2", 5, 6000L));
    testHarness.processElement(insertRecord("key2", 6, 6050L));
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 14L, 3L, 1L)));
    assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
    testHarness.processElement(insertRecord("key1", 3, 4000L));
    testHarness.processElement(insertRecord("key2", 10, 15000L));
    testHarness.processElement(insertRecord("key2", 20, 15000L));
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 36L, 3L, 2L)));
    assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
    testHarness.processElement(insertRecord("key1", 2, 2500L));
    testHarness.processElement(insertRecord("key1", 2, 2500L));
    expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 7L, 3L, 1L)));
    assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
    testHarness.close();
    // we close once in the rest...
    assertEquals("Close was not called.", 2, closeCalled.get());
}
Also used : JoinedRowData(org.apache.flink.table.data.utils.JoinedRowData) GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) LogicalType(org.apache.flink.table.types.logical.LogicalType) BigIntType(org.apache.flink.table.types.logical.BigIntType) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) Test(org.junit.Test)

Example 53 with OperatorSubtaskState

use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.

the class FastTop1FunctionTest method testConstantRankRangeWithoutOffset.

@Override
public void testConstantRankRangeWithoutOffset() throws Exception {
    AbstractTopNFunction func = createFunction(RankType.ROW_NUMBER, new ConstantRankRange(1, 1), true, false);
    OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(func);
    testHarness.open();
    testHarness.processElement(insertRecord("book", 1L, 12));
    testHarness.processElement(insertRecord("book", 2L, 19));
    testHarness.processElement(insertRecord("book", 4L, 11));
    testHarness.processElement(insertRecord("fruit", 4L, 33));
    testHarness.processElement(insertRecord("fruit", 3L, 44));
    testHarness.processElement(insertRecord("fruit", 5L, 22));
    List<Object> expectedOutput = new ArrayList<>();
    expectedOutput.add(insertRecord("book", 1L, 12));
    expectedOutput.add(updateBeforeRecord("book", 1L, 12));
    expectedOutput.add(updateAfterRecord("book", 4L, 11));
    expectedOutput.add(insertRecord("fruit", 4L, 33));
    expectedOutput.add(updateBeforeRecord("fruit", 4L, 33));
    expectedOutput.add(updateAfterRecord("fruit", 5L, 22));
    assertorWithoutRowNumber.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
    // do a snapshot, data could be recovered from state
    OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0);
    testHarness.close();
    expectedOutput.clear();
    func = createFunction(RankType.ROW_NUMBER, new ConstantRankRange(1, 1), true, false);
    testHarness = createTestHarness(func);
    testHarness.setup();
    testHarness.initializeState(snapshot);
    testHarness.open();
    testHarness.processElement(insertRecord("book", 5L, 10));
    testHarness.close();
    expectedOutput.add(updateBeforeRecord("book", 4L, 11));
    expectedOutput.add(updateAfterRecord("book", 5L, 10));
    assertorWithoutRowNumber.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
}
Also used : RowData(org.apache.flink.table.data.RowData) ArrayList(java.util.ArrayList) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState)

Example 54 with OperatorSubtaskState

use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.

the class WindowOperatorTest method testReduceSessionWindowsWithProcessFunction.

@Test
@SuppressWarnings("unchecked")
public void testReduceSessionWindowsWithProcessFunction() throws Exception {
    closeCalled.set(0);
    final int sessionSize = 3;
    ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
    WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(EventTimeSessionWindows.withGap(Time.seconds(sessionSize)), new TimeWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueProcessWindowFunction<>(new ReducedProcessSessionWindowFunction()), EventTimeTrigger.create(), 0, null);
    OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness = createTestHarness(operator);
    ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
    testHarness.open();
    // add elements out-of-order
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 0));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 2), 1000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3), 2500));
    // do a snapshot, close and restore again
    OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);
    testHarness.close();
    testHarness = createTestHarness(operator);
    testHarness.setup();
    testHarness.initializeState(snapshot);
    testHarness.open();
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 10));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 1000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3), 2500));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), 5501));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), 6000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), 6000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 6), 6050));
    testHarness.processWatermark(new Watermark(12000));
    expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-6", 10L, 5500L), 5499));
    expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-6", 0L, 5500L), 5499));
    expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-20", 5501L, 9050L), 9049));
    expectedOutput.add(new Watermark(12000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 10), 15000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 20), 15000));
    testHarness.processWatermark(new Watermark(17999));
    expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-30", 15000L, 18000L), 17999));
    expectedOutput.add(new Watermark(17999));
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
    testHarness.close();
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 55 with OperatorSubtaskState

use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.

the class WindowOperatorTest method testSessionWindowsWithCountTrigger.

/**
 * This tests whether merging works correctly with the CountTrigger.
 */
@Test
public void testSessionWindowsWithCountTrigger() throws Exception {
    closeCalled.set(0);
    final int sessionSize = 3;
    ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents", STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
    WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(EventTimeSessionWindows.withGap(Time.seconds(sessionSize)), new TimeWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalIterableWindowFunction<>(new SessionWindowFunction()), PurgingTrigger.of(CountTrigger.of(4)), 0, null);
    OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness = createTestHarness(operator);
    ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
    testHarness.open();
    // add elements out-of-order
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 0));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 2), 1000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3), 2500));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), 3500));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 10));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 1000));
    // do a snapshot, close and restore again
    OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);
    testHarness.close();
    expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-10", 0L, 6500L), 6499));
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
    expectedOutput.clear();
    testHarness = createTestHarness(operator);
    testHarness.setup();
    testHarness.initializeState(snapshot);
    testHarness.open();
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3), 2500));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 6000));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 6500));
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3), 7000));
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
    // add an element that merges the two "key1" sessions, they should now have count 6, and
    // therefore fire
    testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500));
    expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L));
    TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
    testHarness.close();
}
Also used : ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Test(org.junit.Test)

Aggregations

OperatorSubtaskState (org.apache.flink.runtime.checkpoint.OperatorSubtaskState)178 Test (org.junit.Test)142 Watermark (org.apache.flink.streaming.api.watermark.Watermark)52 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)37 RowData (org.apache.flink.table.data.RowData)31 ArrayList (java.util.ArrayList)28 KeyedOneInputStreamOperatorTestHarness (org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness)25 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)23 Map (java.util.Map)22 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)21 OneInputStreamOperatorTestHarness (org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness)19 HashMap (java.util.HashMap)18 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)18 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)16 Event (org.apache.flink.cep.Event)16 SubEvent (org.apache.flink.cep.SubEvent)16 TimeWindow (org.apache.flink.streaming.api.windowing.windows.TimeWindow)15 GenericRowData (org.apache.flink.table.data.GenericRowData)15 Ignore (org.junit.Ignore)15 TaskStateSnapshot (org.apache.flink.runtime.checkpoint.TaskStateSnapshot)14