use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class GroupingPojoTranslationTest method testCustomPartitioningTupleReduce.
@Test
public void testCustomPartitioningTupleReduce() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Pojo2> data = env.fromElements(new Pojo2()).rebalance().setParallelism(4);
data.groupBy("a").withPartitioner(new TestPartitionerInt()).reduce(new SelectOneReducer<Pojo2>()).output(new DiscardingOutputFormat<Pojo2>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
SingleInputPlanNode reducer = (SingleInputPlanNode) sink.getInput().getSource();
SingleInputPlanNode combiner = (SingleInputPlanNode) reducer.getInput().getSource();
assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, reducer.getInput().getShipStrategy());
assertEquals(ShipStrategyType.FORWARD, combiner.getInput().getShipStrategy());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class JoinCustomPartitioningTest method testJoinWithKeySelectors.
@Test
public void testJoinWithKeySelectors() {
try {
final Partitioner<Integer> partitioner = new TestPartitionerInt();
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Pojo2> input1 = env.fromElements(new Pojo2());
DataSet<Pojo3> input2 = env.fromElements(new Pojo3());
input1.join(input2, JoinHint.REPARTITION_HASH_FIRST).where(new Pojo2KeySelector()).equalTo(new Pojo3KeySelector()).withPartitioner(partitioner).output(new DiscardingOutputFormat<Tuple2<Pojo2, Pojo3>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
DualInputPlanNode join = (DualInputPlanNode) sink.getInput().getSource();
assertEquals(ShipStrategyType.PARTITION_CUSTOM, join.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, join.getInput2().getShipStrategy());
assertEquals(partitioner, join.getInput1().getPartitioner());
assertEquals(partitioner, join.getInput2().getPartitioner());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class JoinCustomPartitioningTest method testIncompatibleHashAndCustomPartitioning.
@Test
public void testIncompatibleHashAndCustomPartitioning() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Long, Long, Long>> input = env.fromElements(new Tuple3<Long, Long, Long>(0L, 0L, 0L));
DataSet<Tuple3<Long, Long, Long>> partitioned = input.partitionCustom(new Partitioner<Long>() {
@Override
public int partition(Long key, int numPartitions) {
return 0;
}
}, 0).map(new IdentityMapper<Tuple3<Long, Long, Long>>()).withForwardedFields("0", "1", "2");
DataSet<Tuple3<Long, Long, Long>> grouped = partitioned.distinct(0, 1).groupBy(1).sortGroup(0, Order.ASCENDING).reduceGroup(new IdentityGroupReducerCombinable<Tuple3<Long, Long, Long>>()).withForwardedFields("0", "1");
grouped.join(partitioned, JoinHint.REPARTITION_HASH_FIRST).where(0).equalTo(0).with(new DummyFlatJoinFunction<Tuple3<Long, Long, Long>>()).output(new DiscardingOutputFormat<Tuple3<Long, Long, Long>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
DualInputPlanNode coGroup = (DualInputPlanNode) sink.getInput().getSource();
assertEquals(ShipStrategyType.PARTITION_HASH, coGroup.getInput1().getShipStrategy());
assertTrue(coGroup.getInput2().getShipStrategy() == ShipStrategyType.PARTITION_HASH || coGroup.getInput2().getShipStrategy() == ShipStrategyType.FORWARD);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class JoinCustomPartitioningTest method testJoinWithTuples.
@Test
public void testJoinWithTuples() {
try {
final Partitioner<Long> partitioner = new TestPartitionerLong();
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple2<Long, Long>> input1 = env.fromElements(new Tuple2<Long, Long>(0L, 0L));
DataSet<Tuple3<Long, Long, Long>> input2 = env.fromElements(new Tuple3<Long, Long, Long>(0L, 0L, 0L));
input1.join(input2, JoinHint.REPARTITION_HASH_FIRST).where(1).equalTo(0).withPartitioner(partitioner).output(new DiscardingOutputFormat<Tuple2<Tuple2<Long, Long>, Tuple3<Long, Long, Long>>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
DualInputPlanNode join = (DualInputPlanNode) sink.getInput().getSource();
assertEquals(ShipStrategyType.PARTITION_CUSTOM, join.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, join.getInput2().getShipStrategy());
assertEquals(partitioner, join.getInput1().getPartitioner());
assertEquals(partitioner, join.getInput2().getPartitioner());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class DataExchangeModeForwardTest method verifySimpleForwardPlan.
private void verifySimpleForwardPlan(ExecutionMode execMode, DataExchangeMode toMap, DataExchangeMode toFilter, DataExchangeMode toKeyExtractor, DataExchangeMode toCombiner, DataExchangeMode toReduce, DataExchangeMode toSink) {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setExecutionMode(execMode);
DataSet<String> dataSet = env.readTextFile("/never/accessed");
dataSet.map(new MapFunction<String, Integer>() {
@Override
public Integer map(String value) {
return 0;
}
}).filter(new FilterFunction<Integer>() {
@Override
public boolean filter(Integer value) {
return false;
}
}).groupBy(new IdentityKeyExtractor<Integer>()).reduceGroup(new Top1GroupReducer<Integer>()).output(new DiscardingOutputFormat<Integer>());
OptimizedPlan optPlan = compileNoStats(env.createProgramPlan());
SinkPlanNode sinkNode = optPlan.getDataSinks().iterator().next();
SingleInputPlanNode reduceNode = (SingleInputPlanNode) sinkNode.getPredecessor();
SingleInputPlanNode combineNode = (SingleInputPlanNode) reduceNode.getPredecessor();
SingleInputPlanNode keyExtractorNode = (SingleInputPlanNode) combineNode.getPredecessor();
SingleInputPlanNode filterNode = (SingleInputPlanNode) keyExtractorNode.getPredecessor();
SingleInputPlanNode mapNode = (SingleInputPlanNode) filterNode.getPredecessor();
assertEquals(toMap, mapNode.getInput().getDataExchangeMode());
assertEquals(toFilter, filterNode.getInput().getDataExchangeMode());
assertEquals(toKeyExtractor, keyExtractorNode.getInput().getDataExchangeMode());
assertEquals(toCombiner, combineNode.getInput().getDataExchangeMode());
assertEquals(toReduce, reduceNode.getInput().getDataExchangeMode());
assertEquals(toSink, sinkNode.getInput().getDataExchangeMode());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations