use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class ManualWindowSpeedITCase method testAlignedProcessingTimeWindows.
@Test
public void testAlignedProcessingTimeWindows() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
env.setParallelism(1);
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
env.addSource(new InfiniteTupleSource(10_000)).keyBy(0).timeWindow(Time.seconds(3)).reduce(new ReduceFunction<Tuple2<String, Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception {
return Tuple2.of(value1.f0, value1.f1 + value2.f1);
}
}).filter(new FilterFunction<Tuple2<String, Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public boolean filter(Tuple2<String, Integer> value) throws Exception {
return value.f0.startsWith("Tuple 0");
}
}).print();
env.execute();
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class StreamingRuntimeContextTest method createListPlainMockOp.
@SuppressWarnings("unchecked")
private static AbstractStreamOperator<?> createListPlainMockOp() throws Exception {
AbstractStreamOperator<?> operatorMock = mock(AbstractStreamOperator.class);
ExecutionConfig config = new ExecutionConfig();
KeyedStateBackend keyedStateBackend = mock(KeyedStateBackend.class);
DefaultKeyedStateStore keyedStateStore = new DefaultKeyedStateStore(keyedStateBackend, config);
when(operatorMock.getExecutionConfig()).thenReturn(config);
doAnswer(new Answer<ListState<String>>() {
@Override
public ListState<String> answer(InvocationOnMock invocationOnMock) throws Throwable {
ListStateDescriptor<String> descr = (ListStateDescriptor<String>) invocationOnMock.getArguments()[2];
AbstractKeyedStateBackend<Integer> backend = new MemoryStateBackend().createKeyedStateBackend(new DummyEnvironment("test_task", 1, 0), new JobID(), "test_op", IntSerializer.INSTANCE, 1, new KeyGroupRange(0, 0), new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()));
backend.setCurrentKey(0);
return backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, descr);
}
}).when(keyedStateBackend).getPartitionedState(Matchers.any(), any(TypeSerializer.class), any(ListStateDescriptor.class));
when(operatorMock.getKeyedStateStore()).thenReturn(keyedStateStore);
return operatorMock;
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class StreamSourceOperatorTest method setupSourceOperator.
@SuppressWarnings("unchecked")
private static <T> void setupSourceOperator(StreamSource<T, ?> operator, TimeCharacteristic timeChar, long watermarkInterval, long latencyMarkInterval, final ProcessingTimeService timeProvider) {
ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.setAutoWatermarkInterval(watermarkInterval);
executionConfig.setLatencyTrackingInterval(latencyMarkInterval);
StreamConfig cfg = new StreamConfig(new Configuration());
cfg.setStateBackend(new MemoryStateBackend());
cfg.setTimeCharacteristic(timeChar);
Environment env = new DummyEnvironment("MockTwoInputTask", 1, 0);
StreamStatusMaintainer streamStatusMaintainer = mock(StreamStatusMaintainer.class);
when(streamStatusMaintainer.getStreamStatus()).thenReturn(StreamStatus.ACTIVE);
StreamTask<?, ?> mockTask = mock(StreamTask.class);
when(mockTask.getName()).thenReturn("Mock Task");
when(mockTask.getCheckpointLock()).thenReturn(new Object());
when(mockTask.getConfiguration()).thenReturn(cfg);
when(mockTask.getEnvironment()).thenReturn(env);
when(mockTask.getExecutionConfig()).thenReturn(executionConfig);
when(mockTask.getAccumulatorMap()).thenReturn(Collections.<String, Accumulator<?, ?>>emptyMap());
when(mockTask.getStreamStatusMaintainer()).thenReturn(streamStatusMaintainer);
doAnswer(new Answer<ProcessingTimeService>() {
@Override
public ProcessingTimeService answer(InvocationOnMock invocation) throws Throwable {
if (timeProvider == null) {
throw new RuntimeException("The time provider is null.");
}
return timeProvider;
}
}).when(mockTask).getProcessingTimeService();
operator.setup(mockTask, cfg, (Output<StreamRecord<T>>) mock(Output.class));
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11.
@Test
public void testSavepointRestoreFromFlink11() throws Exception {
final int EXPECTED_SUCCESSFUL_CHECKS = 21;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// we only test memory state backend yet
env.setStateBackend(new MemoryStateBackend());
env.enableCheckpointing(500);
env.setParallelism(4);
env.setMaxParallelism(4);
// create source
env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11FromRocksDB.
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {
final int EXPECTED_SUCCESSFUL_CHECKS = 21;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// we only test memory state backend yet
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
env.enableCheckpointing(500);
env.setParallelism(4);
env.setMaxParallelism(4);
// create source
env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
Aggregations