use of org.apache.flink.streaming.runtime.tasks.OperatorStateHandles in project flink by apache.
the class ListCheckpointedTest method testUDFReturningEmpty.
@Test
public void testUDFReturningEmpty() throws Exception {
TestUserFunction userFunction = new TestUserFunction(Collections.<Integer>emptyList());
AbstractStreamOperatorTestHarness<Integer> testHarness = new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
testHarness.open();
OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
testHarness.initializeState(snapshot);
Assert.assertTrue(userFunction.isRestored());
}
use of org.apache.flink.streaming.runtime.tasks.OperatorStateHandles in project flink by apache.
the class AbstractStreamOperatorTest method testWatermarkCallbackServiceScalingUp.
@Test
public void testWatermarkCallbackServiceScalingUp() throws Exception {
final int MAX_PARALLELISM = 10;
KeySelector<Tuple2<Integer, String>, Integer> keySelector = new TestKeySelector();
Tuple2<Integer, String> element1 = new Tuple2<>(7, "first");
Tuple2<Integer, String> element2 = new Tuple2<>(10, "start");
int keygroup = KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(element1), MAX_PARALLELISM);
assertEquals(1, keygroup);
assertEquals(0, KeyGroupRangeAssignment.computeOperatorIndexForKeyGroup(MAX_PARALLELISM, 2, keygroup));
keygroup = KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(element2), MAX_PARALLELISM);
assertEquals(9, keygroup);
assertEquals(1, KeyGroupRangeAssignment.computeOperatorIndexForKeyGroup(MAX_PARALLELISM, 2, keygroup));
// now we start the test, we go from parallelism 1 to 2.
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, Integer> testHarness1 = getTestHarness(MAX_PARALLELISM, 1, 0);
testHarness1.open();
testHarness1.processElement(new StreamRecord<>(element1));
testHarness1.processElement(new StreamRecord<>(element2));
assertEquals(0, testHarness1.getOutput().size());
// take a snapshot with some elements in internal sorting queue
OperatorStateHandles snapshot = testHarness1.snapshot(0, 0);
testHarness1.close();
// initialize two sub-tasks with the previously snapshotted state to simulate scaling up
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, Integer> testHarness2 = getTestHarness(MAX_PARALLELISM, 2, 0);
testHarness2.setup();
testHarness2.initializeState(snapshot);
testHarness2.open();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, Integer> testHarness3 = getTestHarness(MAX_PARALLELISM, 2, 1);
testHarness3.setup();
testHarness3.initializeState(snapshot);
testHarness3.open();
testHarness2.processWatermark(new Watermark(10));
testHarness3.processWatermark(new Watermark(10));
assertEquals(2, testHarness2.getOutput().size());
verifyElement(testHarness2.getOutput().poll(), 7);
verifyWatermark(testHarness2.getOutput().poll(), 10);
assertEquals(2, testHarness3.getOutput().size());
verifyElement(testHarness3.getOutput().poll(), 10);
verifyWatermark(testHarness3.getOutput().poll(), 10);
testHarness1.close();
testHarness2.close();
testHarness3.close();
}
use of org.apache.flink.streaming.runtime.tasks.OperatorStateHandles in project flink by apache.
the class AbstractStreamOperatorTest method testStateAndTimerStateShufflingScalingUp.
/**
* Verify that state and timers are checkpointed per key group and that they are correctly
* assigned to operator subtasks when restoring.
*/
@Test
public void testStateAndTimerStateShufflingScalingUp() throws Exception {
final int MAX_PARALLELISM = 10;
// first get two keys that will fall into different key-group ranges that go
// to different operator subtasks when we restore
// get two sub key-ranges so that we can restore two ranges separately
KeyGroupRange subKeyGroupRange1 = new KeyGroupRange(0, (MAX_PARALLELISM / 2) - 1);
KeyGroupRange subKeyGroupRange2 = new KeyGroupRange(subKeyGroupRange1.getEndKeyGroup() + 1, MAX_PARALLELISM - 1);
// get two different keys, one per sub range
int key1 = getKeyInKeyGroupRange(subKeyGroupRange1, MAX_PARALLELISM);
int key2 = getKeyInKeyGroupRange(subKeyGroupRange2, MAX_PARALLELISM);
TestOperator testOperator = new TestOperator();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(testOperator, new TestKeySelector(), BasicTypeInfo.INT_TYPE_INFO, MAX_PARALLELISM, 1, /* num subtasks */
0);
testHarness.open();
testHarness.processWatermark(0L);
testHarness.setProcessingTime(0L);
testHarness.processElement(new Tuple2<>(key1, "SET_EVENT_TIME_TIMER:10"), 0);
testHarness.processElement(new Tuple2<>(key2, "SET_EVENT_TIME_TIMER:20"), 0);
testHarness.processElement(new Tuple2<>(key1, "SET_PROC_TIME_TIMER:10"), 0);
testHarness.processElement(new Tuple2<>(key2, "SET_PROC_TIME_TIMER:20"), 0);
testHarness.processElement(new Tuple2<>(key1, "SET_STATE:HELLO"), 0);
testHarness.processElement(new Tuple2<>(key2, "SET_STATE:CIAO"), 0);
assertTrue(extractResult(testHarness).isEmpty());
OperatorStateHandles snapshot = testHarness.snapshot(0, 0);
// now, restore in two operators, first operator 1
TestOperator testOperator1 = new TestOperator();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness1 = new KeyedOneInputStreamOperatorTestHarness<>(testOperator1, new TestKeySelector(), BasicTypeInfo.INT_TYPE_INFO, MAX_PARALLELISM, 2, /* num subtasks */
0);
testHarness1.setup();
testHarness1.initializeState(snapshot);
testHarness1.open();
testHarness1.processWatermark(10L);
assertThat(extractResult(testHarness1), contains("ON_EVENT_TIME:HELLO"));
assertTrue(extractResult(testHarness1).isEmpty());
// this should not trigger anything, the trigger for WM=20 should sit in the
// other operator subtask
testHarness1.processWatermark(20L);
assertTrue(extractResult(testHarness1).isEmpty());
testHarness1.setProcessingTime(10L);
assertThat(extractResult(testHarness1), contains("ON_PROC_TIME:HELLO"));
assertTrue(extractResult(testHarness1).isEmpty());
// this should not trigger anything, the trigger for TIME=20 should sit in the
// other operator subtask
testHarness1.setProcessingTime(20L);
assertTrue(extractResult(testHarness1).isEmpty());
// now, for the second operator
TestOperator testOperator2 = new TestOperator();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness2 = new KeyedOneInputStreamOperatorTestHarness<>(testOperator2, new TestKeySelector(), BasicTypeInfo.INT_TYPE_INFO, MAX_PARALLELISM, 2, /* num subtasks */
1);
testHarness2.setup();
testHarness2.initializeState(snapshot);
testHarness2.open();
testHarness2.processWatermark(10L);
// nothing should happen because this timer is in the other subtask
assertTrue(extractResult(testHarness2).isEmpty());
testHarness2.processWatermark(20L);
assertThat(extractResult(testHarness2), contains("ON_EVENT_TIME:CIAO"));
testHarness2.setProcessingTime(10L);
// nothing should happen because this timer is in the other subtask
assertTrue(extractResult(testHarness2).isEmpty());
testHarness2.setProcessingTime(20L);
assertThat(extractResult(testHarness2), contains("ON_PROC_TIME:CIAO"));
assertTrue(extractResult(testHarness2).isEmpty());
}
use of org.apache.flink.streaming.runtime.tasks.OperatorStateHandles in project flink by apache.
the class AbstractStreamOperatorTest method testStateAndTimerStateShufflingScalingDown.
@Test
public void testStateAndTimerStateShufflingScalingDown() throws Exception {
final int MAX_PARALLELISM = 10;
// first get two keys that will fall into different key-group ranges that go
// to different operator subtasks when we restore
// get two sub key-ranges so that we can restore two ranges separately
KeyGroupRange subKeyGroupRange1 = new KeyGroupRange(0, (MAX_PARALLELISM / 2) - 1);
KeyGroupRange subKeyGroupRange2 = new KeyGroupRange(subKeyGroupRange1.getEndKeyGroup() + 1, MAX_PARALLELISM - 1);
// get two different keys, one per sub range
int key1 = getKeyInKeyGroupRange(subKeyGroupRange1, MAX_PARALLELISM);
int key2 = getKeyInKeyGroupRange(subKeyGroupRange2, MAX_PARALLELISM);
TestOperator testOperator1 = new TestOperator();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness1 = new KeyedOneInputStreamOperatorTestHarness<>(testOperator1, new TestKeySelector(), BasicTypeInfo.INT_TYPE_INFO, MAX_PARALLELISM, 2, /* num subtasks */
0);
testHarness1.setup();
testHarness1.open();
testHarness1.processWatermark(0L);
testHarness1.setProcessingTime(0L);
TestOperator testOperator2 = new TestOperator();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness2 = new KeyedOneInputStreamOperatorTestHarness<>(testOperator2, new TestKeySelector(), BasicTypeInfo.INT_TYPE_INFO, MAX_PARALLELISM, 2, /* num subtasks */
1);
testHarness2.setup();
testHarness2.open();
testHarness2.processWatermark(0L);
testHarness2.setProcessingTime(0L);
// register some state with both instances and scale down to parallelism 1
testHarness1.processElement(new Tuple2<>(key1, "SET_EVENT_TIME_TIMER:30"), 0);
testHarness1.processElement(new Tuple2<>(key1, "SET_PROC_TIME_TIMER:30"), 0);
testHarness1.processElement(new Tuple2<>(key1, "SET_STATE:HELLO"), 0);
testHarness2.processElement(new Tuple2<>(key2, "SET_EVENT_TIME_TIMER:40"), 0);
testHarness2.processElement(new Tuple2<>(key2, "SET_PROC_TIME_TIMER:40"), 0);
testHarness2.processElement(new Tuple2<>(key2, "SET_STATE:CIAO"), 0);
// take a snapshot from each one of the "parallel" instances of the operator
// and combine them into one so that we can scale down
OperatorStateHandles repackagedState = AbstractStreamOperatorTestHarness.repackageState(testHarness1.snapshot(0, 0), testHarness2.snapshot(0, 0));
// now, for the third operator that scales down from parallelism of 2 to 1
TestOperator testOperator3 = new TestOperator();
KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness3 = new KeyedOneInputStreamOperatorTestHarness<>(testOperator3, new TestKeySelector(), BasicTypeInfo.INT_TYPE_INFO, MAX_PARALLELISM, 1, /* num subtasks */
0);
testHarness3.setup();
testHarness3.initializeState(repackagedState);
testHarness3.open();
testHarness3.processWatermark(30L);
assertThat(extractResult(testHarness3), contains("ON_EVENT_TIME:HELLO"));
assertTrue(extractResult(testHarness3).isEmpty());
testHarness3.processWatermark(40L);
assertThat(extractResult(testHarness3), contains("ON_EVENT_TIME:CIAO"));
assertTrue(extractResult(testHarness3).isEmpty());
testHarness3.setProcessingTime(30L);
assertThat(extractResult(testHarness3), contains("ON_PROC_TIME:HELLO"));
assertTrue(extractResult(testHarness3).isEmpty());
testHarness3.setProcessingTime(40L);
assertThat(extractResult(testHarness3), contains("ON_PROC_TIME:CIAO"));
assertTrue(extractResult(testHarness3).isEmpty());
}
use of org.apache.flink.streaming.runtime.tasks.OperatorStateHandles in project flink by apache.
the class KeyedCoProcessOperatorTest method testSnapshotAndRestore.
@Test
public void testSnapshotAndRestore() throws Exception {
KeyedCoProcessOperator<String, Integer, String, String> operator = new KeyedCoProcessOperator<>(new BothTriggeringProcessFunction());
TwoInputStreamOperatorTestHarness<Integer, String, String> testHarness = new KeyedTwoInputStreamOperatorTestHarness<>(operator, new IntToStringKeySelector<>(), new IdentityKeySelector<String>(), BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setup();
testHarness.open();
testHarness.processElement1(new StreamRecord<>(5, 12L));
testHarness.processElement2(new StreamRecord<>("5", 12L));
// snapshot and restore from scratch
OperatorStateHandles snapshot = testHarness.snapshot(0, 0);
testHarness.close();
operator = new KeyedCoProcessOperator<>(new BothTriggeringProcessFunction());
testHarness = new KeyedTwoInputStreamOperatorTestHarness<>(operator, new IntToStringKeySelector<>(), new IdentityKeySelector<String>(), BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setup();
testHarness.initializeState(snapshot);
testHarness.open();
testHarness.setProcessingTime(5);
testHarness.processWatermark1(new Watermark(6));
testHarness.processWatermark2(new Watermark(6));
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
expectedOutput.add(new StreamRecord<>("PROC:1777", 5L));
expectedOutput.add(new StreamRecord<>("EVENT:1777", 6L));
expectedOutput.add(new Watermark(6));
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
Aggregations