use of org.apache.flink.test.streaming.runtime.util.TestListResultSink in project flink by apache.
the class PartitionerITCase method partitionerTest.
@Test
public void partitionerTest() {
TestListResultSink<Tuple2<Integer, String>> hashPartitionResultSink = new TestListResultSink<Tuple2<Integer, String>>();
TestListResultSink<Tuple2<Integer, String>> customPartitionResultSink = new TestListResultSink<Tuple2<Integer, String>>();
TestListResultSink<Tuple2<Integer, String>> broadcastPartitionResultSink = new TestListResultSink<Tuple2<Integer, String>>();
TestListResultSink<Tuple2<Integer, String>> forwardPartitionResultSink = new TestListResultSink<Tuple2<Integer, String>>();
TestListResultSink<Tuple2<Integer, String>> rebalancePartitionResultSink = new TestListResultSink<Tuple2<Integer, String>>();
TestListResultSink<Tuple2<Integer, String>> globalPartitionResultSink = new TestListResultSink<Tuple2<Integer, String>>();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
DataStream<Tuple1<String>> src = env.fromElements(new Tuple1<String>("a"), new Tuple1<String>("b"), new Tuple1<String>("b"), new Tuple1<String>("a"), new Tuple1<String>("a"), new Tuple1<String>("c"), new Tuple1<String>("a"));
// partition by hash
src.keyBy(0).map(new SubtaskIndexAssigner()).addSink(hashPartitionResultSink);
// partition custom
DataStream<Tuple2<Integer, String>> partitionCustom = src.partitionCustom(new Partitioner<String>() {
@Override
public int partition(String key, int numPartitions) {
if (key.equals("c")) {
return 2;
} else {
return 0;
}
}
}, 0).map(new SubtaskIndexAssigner());
partitionCustom.addSink(customPartitionResultSink);
// partition broadcast
src.broadcast().map(new SubtaskIndexAssigner()).addSink(broadcastPartitionResultSink);
// partition rebalance
src.rebalance().map(new SubtaskIndexAssigner()).addSink(rebalancePartitionResultSink);
// partition forward
src.map(new MapFunction<Tuple1<String>, Tuple1<String>>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple1<String> map(Tuple1<String> value) throws Exception {
return value;
}
}).forward().map(new SubtaskIndexAssigner()).addSink(forwardPartitionResultSink);
// partition global
src.global().map(new SubtaskIndexAssigner()).addSink(globalPartitionResultSink);
try {
env.execute();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
List<Tuple2<Integer, String>> hashPartitionResult = hashPartitionResultSink.getResult();
List<Tuple2<Integer, String>> customPartitionResult = customPartitionResultSink.getResult();
List<Tuple2<Integer, String>> broadcastPartitionResult = broadcastPartitionResultSink.getResult();
List<Tuple2<Integer, String>> forwardPartitionResult = forwardPartitionResultSink.getResult();
List<Tuple2<Integer, String>> rebalancePartitionResult = rebalancePartitionResultSink.getResult();
List<Tuple2<Integer, String>> globalPartitionResult = globalPartitionResultSink.getResult();
verifyHashPartitioning(hashPartitionResult);
verifyCustomPartitioning(customPartitionResult);
verifyBroadcastPartitioning(broadcastPartitionResult);
verifyRebalancePartitioning(forwardPartitionResult);
verifyRebalancePartitioning(rebalancePartitionResult);
verifyGlobalPartitioning(globalPartitionResult);
}
Aggregations