use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11FromRocksDB.
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {
final int EXPECTED_SUCCESSFUL_CHECKS = 21;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// we only test memory state backend yet
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
env.enableCheckpointing(500);
env.setParallelism(4);
env.setMaxParallelism(4);
// create source
env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class ManualWindowSpeedITCase method testAlignedProcessingTimeWindows.
@Test
public void testAlignedProcessingTimeWindows() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
env.setParallelism(1);
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
env.addSource(new InfiniteTupleSource(10_000)).keyBy(0).timeWindow(Time.seconds(3)).reduce(new ReduceFunction<Tuple2<String, Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception {
return Tuple2.of(value1.f0, value1.f1 + value2.f1);
}
}).filter(new FilterFunction<Tuple2<String, Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public boolean filter(Tuple2<String, Integer> value) throws Exception {
return value.f0.startsWith("Tuple 0");
}
}).print();
env.execute();
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class QsStateProducer method main.
public static void main(final String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
ParameterTool tool = ParameterTool.fromArgs(args);
String tmpPath = tool.getRequired("tmp-dir");
String stateBackendType = tool.getRequired("state-backend");
StateBackend stateBackend;
switch(stateBackendType) {
case "rocksdb":
stateBackend = new RocksDBStateBackend(tmpPath);
break;
case "fs":
stateBackend = new FsStateBackend(tmpPath);
break;
case "memory":
stateBackend = new MemoryStateBackend();
break;
default:
throw new RuntimeException("Unsupported state backend " + stateBackendType);
}
env.setStateBackend(stateBackend);
env.enableCheckpointing(1000L);
env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
env.getCheckpointConfig().setMinPauseBetweenCheckpoints(0);
env.addSource(new EmailSource()).keyBy(new KeySelector<Email, String>() {
private static final long serialVersionUID = -1480525724620425363L;
@Override
public String getKey(Email value) throws Exception {
return QsConstants.KEY;
}
}).flatMap(new TestFlatMap());
env.execute();
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class CEPOperatorTest method testKeyedCEPOperatorNFAUpdateWithRocksDB.
@Test
public void testKeyedCEPOperatorNFAUpdateWithRocksDB() throws Exception {
String rocksDbPath = tempFolder.newFolder().getAbsolutePath();
RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend(), TernaryBoolean.FALSE);
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOperator(true, new SimpleNFAFactory());
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(operator);
try {
harness.setStateBackend(rocksDBStateBackend);
harness.open();
Event startEvent = new Event(42, "c", 1.0);
SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0);
Event endEvent = new Event(42, "b", 1.0);
harness.processElement(new StreamRecord<>(startEvent, 1L));
// simulate snapshot/restore with some elements in internal sorting queue
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
harness.close();
operator = CepOperatorTestUtilities.getKeyedCepOperator(true, new SimpleNFAFactory());
harness = CepOperatorTestUtilities.getCepTestHarness(operator);
rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup();
harness.initializeState(snapshot);
harness.open();
harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L));
OperatorSubtaskState snapshot2 = harness.snapshot(0L, 0L);
harness.close();
operator = CepOperatorTestUtilities.getKeyedCepOperator(true, new SimpleNFAFactory());
harness = CepOperatorTestUtilities.getCepTestHarness(operator);
rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup();
harness.initializeState(snapshot2);
harness.open();
harness.processElement(new StreamRecord<Event>(middleEvent, 4L));
harness.processElement(new StreamRecord<>(endEvent, 4L));
// get and verify the output
Queue<Object> result = harness.getOutput();
assertEquals(1, result.size());
verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
} finally {
harness.close();
}
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class CEPOperatorTest method testKeyedAdvancingTimeWithoutElements.
/**
* Tests that the internal time of a CEP operator advances only given watermarks. See FLINK-5033
*/
@Test
public void testKeyedAdvancingTimeWithoutElements() throws Exception {
final Event startEvent = new Event(42, "start", 1.0);
final long watermarkTimestamp1 = 5L;
final long watermarkTimestamp2 = 13L;
final Map<String, List<Event>> expectedSequence = new HashMap<>(2);
expectedSequence.put("start", Collections.<Event>singletonList(startEvent));
final OutputTag<Tuple2<Map<String, List<Event>>, Long>> timedOut = new OutputTag<Tuple2<Map<String, List<Event>>, Long>>("timedOut") {
};
final KeyedOneInputStreamOperatorTestHarness<Integer, Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(new CepOperator<>(Event.createTypeSerializer(), false, new NFAFactory(true), null, null, new TimedOutProcessFunction(timedOut), null), new KeySelector<Event, Integer>() {
private static final long serialVersionUID = 7219185117566268366L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
}, BasicTypeInfo.INT_TYPE_INFO);
try {
String rocksDbPath = tempFolder.newFolder().getAbsolutePath();
RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup(new KryoSerializer<>((Class<Map<String, List<Event>>>) (Object) Map.class, new ExecutionConfig()));
harness.open();
harness.processElement(new StreamRecord<>(startEvent, 3L));
harness.processWatermark(new Watermark(watermarkTimestamp1));
harness.processWatermark(new Watermark(watermarkTimestamp2));
Queue<Object> result = harness.getOutput();
Queue<StreamRecord<Tuple2<Map<String, List<Event>>, Long>>> sideOutput = harness.getSideOutput(timedOut);
assertEquals(2L, result.size());
assertEquals(1L, sideOutput.size());
Object watermark1 = result.poll();
assertTrue(watermark1 instanceof Watermark);
assertEquals(watermarkTimestamp1, ((Watermark) watermark1).getTimestamp());
Tuple2<Map<String, List<Event>>, Long> leftResult = sideOutput.poll().getValue();
assertEquals(watermarkTimestamp2, (long) leftResult.f1);
assertEquals(expectedSequence, leftResult.f0);
Object watermark2 = result.poll();
assertTrue(watermark2 instanceof Watermark);
assertEquals(watermarkTimestamp2, ((Watermark) watermark2).getTimestamp());
} finally {
harness.close();
}
}
Aggregations