use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class AbstractQueryableStateTestBase method testReducingState.
/**
* Tests simple reducing state queryable state instance. Each source emits (subtaskIndex,
* 0)..(subtaskIndex, numElements) tuples, which are then queried. The reducing state instance
* sums these up. The test succeeds after each subtask index is queried with result n*(n+1)/2.
*/
@Test
public void testReducingState() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final long numElements = 1024L;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any", new SumReduce(), source.getType());
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 8470749712274833552L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState("jungle", reducingState);
try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
final JobID jobId = autoCancellableJob.getJobId();
final JobGraph jobGraph = autoCancellableJob.getJobGraph();
clusterClient.submitJob(jobGraph).get();
final long expected = numElements * (numElements + 1L) / 2L;
for (int key = 0; key < maxParallelism; key++) {
boolean success = false;
while (deadline.hasTimeLeft() && !success) {
CompletableFuture<ReducingState<Tuple2<Integer, Long>>> future = getKvState(deadline, client, jobId, "jungle", key, BasicTypeInfo.INT_TYPE_INFO, reducingState, false, executor);
Tuple2<Integer, Long> value = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS).get();
assertEquals("Key mismatch", key, value.f0.intValue());
if (expected == value.f1) {
success = true;
} else {
// Retry
Thread.sleep(RETRY_TIMEOUT);
}
}
assertTrue("Did not succeed query", success);
}
}
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class WindowOperatorMigrationTest method testRestoreReducingProcessingTimeWindows.
@Test
public void testRestoreReducingProcessingTimeWindows() throws Exception {
final int windowSize = 3;
ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer<>(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(TumblingProcessingTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)), new TimeWindow.Serializer(), new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<String, TimeWindow, Tuple2<String, Integer>>()), ProcessingTimeTrigger.create(), 0, null);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setup();
testHarness.initializeState(OperatorSnapshotUtil.getResourceFilename("win-op-migration-test-reduce-processing-time-flink" + testMigrateVersion + "-snapshot"));
testHarness.open();
testHarness.setProcessingTime(3020);
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3)));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3)));
testHarness.setProcessingTime(6000);
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 5999));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 4), 5999));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key3", 1), 5999));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
testHarness.close();
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class WindowOperatorMigrationTest method writeWindowsWithKryoSerializedKeysSnapshot.
/**
* Manually run this to write binary snapshot data.
*/
@Ignore
@Test
public void writeWindowsWithKryoSerializedKeysSnapshot() throws Exception {
final int windowSize = 3;
TypeInformation<Tuple2<NonPojoType, Integer>> inputType = new TypeHint<Tuple2<NonPojoType, Integer>>() {
}.getTypeInfo();
ReducingStateDescriptor<Tuple2<NonPojoType, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer<>(), inputType.createSerializer(new ExecutionConfig()));
TypeSerializer<NonPojoType> keySerializer = TypeInformation.of(NonPojoType.class).createSerializer(new ExecutionConfig());
assertTrue(keySerializer instanceof KryoSerializer);
WindowOperator<NonPojoType, Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>, TimeWindow> operator = new WindowOperator<>(TumblingEventTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)), new TimeWindow.Serializer(), new TupleKeySelector<>(), keySerializer, stateDesc, new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<>()), EventTimeTrigger.create(), 0, null);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
OneInputStreamOperatorTestHarness<Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), TypeInformation.of(NonPojoType.class));
testHarness.setup();
testHarness.open();
// add elements out-of-order
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 3999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 3000));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 20));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 0));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1998));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1000));
testHarness.processWatermark(new Watermark(999));
expectedOutput.add(new Watermark(999));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
testHarness.processWatermark(new Watermark(1999));
expectedOutput.add(new Watermark(1999));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
// do snapshot and save to file
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/win-op-migration-test-kryo-serialized-key-flink" + flinkGenerateSavepointVersion + "-snapshot");
testHarness.close();
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class WindowOperatorMigrationTest method writeReducingEventTimeWindowsSnapshot.
/**
* Manually run this to write binary snapshot data.
*/
@Ignore
@Test
public void writeReducingEventTimeWindowsSnapshot() throws Exception {
final int windowSize = 3;
ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer<>(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(TumblingEventTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)), new TimeWindow.Serializer(), new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<String, TimeWindow, Tuple2<String, Integer>>()), EventTimeTrigger.create(), 0, null);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setup();
testHarness.open();
// add elements out-of-order
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 3999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 3000));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 20));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 0));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1998));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1000));
testHarness.processWatermark(new Watermark(999));
expectedOutput.add(new Watermark(999));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
testHarness.processWatermark(new Watermark(1999));
expectedOutput.add(new Watermark(1999));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
// do snapshot and save to file
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/win-op-migration-test-reduce-event-time-flink" + flinkGenerateSavepointVersion + "-snapshot");
testHarness.close();
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class WindowOperatorMigrationTest method writeReducingProcessingTimeWindowsSnapshot.
/**
* Manually run this to write binary snapshot data.
*/
@Ignore
@Test
public void writeReducingProcessingTimeWindowsSnapshot() throws Exception {
final int windowSize = 3;
ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents", new SumReducer<>(), STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));
WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(TumblingProcessingTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)), new TimeWindow.Serializer(), new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<String, TimeWindow, Tuple2<String, Integer>>()), ProcessingTimeTrigger.create(), 0, null);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setup();
testHarness.open();
testHarness.setProcessingTime(10);
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1)));
testHarness.setProcessingTime(3010);
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key3", 1)));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 1), 2999));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 1), 2999));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
// do snapshot and save to file
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/win-op-migration-test-reduce-processing-time-flink" + flinkGenerateSavepointVersion + "-snapshot");
testHarness.close();
}
Aggregations