use of org.apache.flink.api.common.typeinfo.TypeHint in project flink by apache.
the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11.
@Test
public void testSavepointRestoreFromFlink11() throws Exception {
final int EXPECTED_SUCCESSFUL_CHECKS = 21;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// we only test memory state backend yet
env.setStateBackend(new MemoryStateBackend());
env.enableCheckpointing(500);
env.setParallelism(4);
env.setMaxParallelism(4);
// create source
env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
use of org.apache.flink.api.common.typeinfo.TypeHint in project flink by apache.
the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11FromRocksDB.
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {
final int EXPECTED_SUCCESSFUL_CHECKS = 21;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// we only test memory state backend yet
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
env.enableCheckpointing(500);
env.setParallelism(4);
env.setMaxParallelism(4);
// create source
env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
use of org.apache.flink.api.common.typeinfo.TypeHint in project flink by apache.
the class CassandraConnectorITCase method testCassandraBatchFormats.
@Test
public void testCassandraBatchFormats() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
DataSet<Tuple3<String, Integer, Integer>> dataSet = env.fromCollection(collection);
dataSet.output(new CassandraOutputFormat<Tuple3<String, Integer, Integer>>(INSERT_DATA_QUERY, builder));
env.execute("Write data");
DataSet<Tuple3<String, Integer, Integer>> inputDS = env.createInput(new CassandraInputFormat<Tuple3<String, Integer, Integer>>(SELECT_DATA_QUERY, builder), TypeInformation.of(new TypeHint<Tuple3<String, Integer, Integer>>() {
}));
long count = inputDS.count();
Assert.assertEquals(count, 20L);
}
use of org.apache.flink.api.common.typeinfo.TypeHint in project flink by apache.
the class BucketingSinkTest method testNonRollingSequenceFileWithoutCompressionWriter.
/**
* This tests {@link SequenceFileWriter}
* with non-rolling output and without compression.
*/
@Test
public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception {
final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out";
final int numElements = 20;
BucketingSink<Tuple2<IntWritable, Text>> sink = new BucketingSink<Tuple2<IntWritable, Text>>(outPath).setWriter(new SequenceFileWriter<IntWritable, Text>()).setBucketer(new BasePathBucketer<Tuple2<IntWritable, Text>>()).setPartPrefix(PART_PREFIX).setPendingPrefix("").setPendingSuffix("");
sink.setInputType(TypeInformation.of(new TypeHint<Tuple2<IntWritable, Text>>() {
}), new ExecutionConfig());
OneInputStreamOperatorTestHarness<Tuple2<IntWritable, Text>, Object> testHarness = createTestSink(sink, 1, 0);
testHarness.setProcessingTime(0L);
testHarness.setup();
testHarness.open();
for (int i = 0; i < numElements; i++) {
testHarness.processElement(new StreamRecord<>(Tuple2.of(new IntWritable(i), new Text("message #" + Integer.toString(i)))));
}
testHarness.close();
FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0"));
SequenceFile.Reader reader = new SequenceFile.Reader(inStream, 1000, 0, 100000, new Configuration());
IntWritable intWritable = new IntWritable();
Text txt = new Text();
for (int i = 0; i < numElements; i++) {
reader.next(intWritable, txt);
Assert.assertEquals(i, intWritable.get());
Assert.assertEquals("message #" + i, txt.toString());
}
reader.close();
inStream.close();
}
use of org.apache.flink.api.common.typeinfo.TypeHint in project flink by apache.
the class BatchExample method main.
/*
* table script: "CREATE TABLE test.batches (number int, strings text, PRIMARY KEY(number, strings));"
*/
public static void main(String[] args) throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
ArrayList<Tuple2<Integer, String>> collection = new ArrayList<>(20);
for (int i = 0; i < 20; i++) {
collection.add(new Tuple2<>(i, "string " + i));
}
DataSet<Tuple2<Integer, String>> dataSet = env.fromCollection(collection);
dataSet.output(new CassandraOutputFormat<Tuple2<Integer, String>>(INSERT_QUERY, new ClusterBuilder() {
@Override
protected Cluster buildCluster(Builder builder) {
return builder.addContactPoints("127.0.0.1").build();
}
}));
env.execute("Write");
DataSet<Tuple2<Integer, String>> inputDS = env.createInput(new CassandraInputFormat<Tuple2<Integer, String>>(SELECT_QUERY, new ClusterBuilder() {
@Override
protected Cluster buildCluster(Builder builder) {
return builder.addContactPoints("127.0.0.1").build();
}
}), TupleTypeInfo.of(new TypeHint<Tuple2<Integer, String>>() {
}));
inputDS.print();
}
Aggregations