use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class UnionPropertyPropagationTest method testUnion1.
@Test
public void testUnion1() {
// construct the plan
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSet<Long> sourceA = env.generateSequence(0, 1);
DataSet<Long> sourceB = env.generateSequence(0, 1);
DataSet<Long> redA = sourceA.groupBy("*").reduceGroup(new IdentityGroupReducer<Long>());
DataSet<Long> redB = sourceB.groupBy("*").reduceGroup(new IdentityGroupReducer<Long>());
redA.union(redB).groupBy("*").reduceGroup(new IdentityGroupReducer<Long>()).output(new DiscardingOutputFormat<Long>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = compileNoStats(plan);
JobGraphGenerator jobGen = new JobGraphGenerator();
// Compile plan to verify that no error is thrown
jobGen.compileJobGraph(oPlan);
oPlan.accept(new Visitor<PlanNode>() {
@Override
public boolean preVisit(PlanNode visitable) {
if (visitable instanceof SingleInputPlanNode && visitable.getProgramOperator() instanceof GroupReduceOperatorBase) {
for (Channel inConn : visitable.getInputs()) {
Assert.assertTrue("Reduce should just forward the input if it is already partitioned", inConn.getShipStrategy() == ShipStrategyType.FORWARD);
}
//just check latest ReduceNode
return false;
}
return true;
}
@Override
public void postVisit(PlanNode visitable) {
// DO NOTHING
}
});
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class UnionReplacementTest method testUnionReplacement.
@Test
public void testUnionReplacement() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<String> input1 = env.fromElements("test1");
DataSet<String> input2 = env.fromElements("test2");
DataSet<String> union = input1.union(input2);
union.output(new DiscardingOutputFormat<String>());
union.output(new DiscardingOutputFormat<String>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = compileNoStats(plan);
JobGraphGenerator jobGen = new JobGraphGenerator();
jobGen.compileJobGraph(oPlan);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class WorksetIterationsRecordApiCompilerTest method testRecordApiWithDirectSoltionSetUpdate.
@Test
public void testRecordApiWithDirectSoltionSetUpdate() {
Plan plan = getTestPlan(true, false);
OptimizedPlan oPlan;
try {
oPlan = compileNoStats(plan);
} catch (CompilerException ce) {
ce.printStackTrace();
fail("The pact compiler is unable to compile this plan correctly.");
// silence the compiler
return;
}
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode joinWithInvariantNode = resolver.getNode(JOIN_WITH_INVARIANT_NAME);
DualInputPlanNode joinWithSolutionSetNode = resolver.getNode(JOIN_WITH_SOLUTION_SET);
SingleInputPlanNode worksetReducer = resolver.getNode(NEXT_WORKSET_REDUCER_NAME);
// iteration preserves partitioning in reducer, so the first partitioning is out of the loop,
// the in-loop partitioning is before the final reducer
// verify joinWithInvariant
assertEquals(ShipStrategyType.FORWARD, joinWithInvariantNode.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, joinWithInvariantNode.getInput2().getShipStrategy());
assertEquals(list0, joinWithInvariantNode.getKeysForInput1());
assertEquals(list0, joinWithInvariantNode.getKeysForInput2());
// verify joinWithSolutionSet
assertEquals(ShipStrategyType.FORWARD, joinWithSolutionSetNode.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.FORWARD, joinWithSolutionSetNode.getInput2().getShipStrategy());
// verify reducer
assertEquals(ShipStrategyType.FORWARD, worksetReducer.getInput().getShipStrategy());
assertEquals(list0, worksetReducer.getKeys(0));
// verify solution delta
assertEquals(1, joinWithSolutionSetNode.getOutgoingChannels().size());
assertEquals(ShipStrategyType.FORWARD, joinWithSolutionSetNode.getOutgoingChannels().get(0).getShipStrategy());
new JobGraphGenerator().compileJobGraph(oPlan);
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class ReduceAllTest method testReduce.
@Test
public void testReduce() {
// construct the plan
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSet<Long> set1 = env.generateSequence(0, 1);
set1.reduceGroup(new IdentityGroupReducer<Long>()).name("Reduce1").output(new DiscardingOutputFormat<Long>()).name("Sink");
Plan plan = env.createProgramPlan();
try {
OptimizedPlan oPlan = compileNoStats(plan);
JobGraphGenerator jobGen = new JobGraphGenerator();
jobGen.compileJobGraph(oPlan);
} catch (CompilerException ce) {
ce.printStackTrace();
fail("The pact compiler is unable to compile this plan correctly");
}
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class BinaryCustomPartitioningCompatibilityTest method testCompatiblePartitioningJoin.
@Test
public void testCompatiblePartitioningJoin() {
try {
final Partitioner<Long> partitioner = new Partitioner<Long>() {
@Override
public int partition(Long key, int numPartitions) {
return 0;
}
};
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple2<Long, Long>> input1 = env.fromElements(new Tuple2<Long, Long>(0L, 0L));
DataSet<Tuple3<Long, Long, Long>> input2 = env.fromElements(new Tuple3<Long, Long, Long>(0L, 0L, 0L));
input1.partitionCustom(partitioner, 1).join(input2.partitionCustom(partitioner, 0)).where(1).equalTo(0).output(new DiscardingOutputFormat<Tuple2<Tuple2<Long, Long>, Tuple3<Long, Long, Long>>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
DualInputPlanNode join = (DualInputPlanNode) sink.getInput().getSource();
SingleInputPlanNode partitioner1 = (SingleInputPlanNode) join.getInput1().getSource();
SingleInputPlanNode partitioner2 = (SingleInputPlanNode) join.getInput2().getSource();
assertEquals(ShipStrategyType.FORWARD, join.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.FORWARD, join.getInput2().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, partitioner1.getInput().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, partitioner2.getInput().getShipStrategy());
assertEquals(partitioner, partitioner1.getInput().getPartitioner());
assertEquals(partitioner, partitioner2.getInput().getPartitioner());
new JobGraphGenerator().compileJobGraph(op);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations