use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class BinaryCustomPartitioningCompatibilityTest method testCompatiblePartitioningCoGroup.
@Test
public void testCompatiblePartitioningCoGroup() {
try {
final Partitioner<Long> partitioner = new Partitioner<Long>() {
@Override
public int partition(Long key, int numPartitions) {
return 0;
}
};
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple2<Long, Long>> input1 = env.fromElements(new Tuple2<Long, Long>(0L, 0L));
DataSet<Tuple3<Long, Long, Long>> input2 = env.fromElements(new Tuple3<Long, Long, Long>(0L, 0L, 0L));
input1.partitionCustom(partitioner, 1).coGroup(input2.partitionCustom(partitioner, 0)).where(1).equalTo(0).with(new DummyCoGroupFunction<Tuple2<Long, Long>, Tuple3<Long, Long, Long>>()).output(new DiscardingOutputFormat<Tuple2<Tuple2<Long, Long>, Tuple3<Long, Long, Long>>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
DualInputPlanNode coGroup = (DualInputPlanNode) sink.getInput().getSource();
SingleInputPlanNode partitioner1 = (SingleInputPlanNode) coGroup.getInput1().getSource();
SingleInputPlanNode partitioner2 = (SingleInputPlanNode) coGroup.getInput2().getSource();
assertEquals(ShipStrategyType.FORWARD, coGroup.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.FORWARD, coGroup.getInput2().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, partitioner1.getInput().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, partitioner2.getInput().getShipStrategy());
assertEquals(partitioner, partitioner1.getInput().getPartitioner());
assertEquals(partitioner, partitioner2.getInput().getPartitioner());
new JobGraphGenerator().compileJobGraph(op);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class CachedMatchStrategyCompilerTest method testRightSide.
/**
* This tests whether a HYBRIDHASH_BUILD_SECOND is correctly transformed to a HYBRIDHASH_BUILD_SECOND_CACHED
* when inside of an iteration an on the static path
*/
@Test
public void testRightSide() {
try {
Plan plan = getTestPlanRightStatic(Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_SECOND);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_SECOND_CACHED, innerJoin.getDriverStrategy());
assertEquals(TempMode.NONE, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.NONE, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class CachedMatchStrategyCompilerTest method testRightSideCountercheck.
/**
* This test makes sure that only a HYBRIDHASH on the static path is transformed to the cached variant
*/
@Test
public void testRightSideCountercheck() {
try {
Plan plan = getTestPlanRightStatic(Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_FIRST);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_FIRST, innerJoin.getDriverStrategy());
assertEquals(TempMode.NONE, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.CACHED, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class DisjointDataFlowsTest method testDisjointFlows.
@Test
public void testDisjointFlows() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// generate two different flows
env.generateSequence(1, 10).output(new DiscardingOutputFormat<Long>());
env.generateSequence(1, 10).output(new DiscardingOutputFormat<Long>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
new JobGraphGenerator().compileJobGraph(op);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plantranslate.JobGraphGenerator in project flink by apache.
the class IterationsCompilerTest method testTwoIterationsWithMapperInbetween.
@Test
public void testTwoIterationsWithMapperInbetween() throws Exception {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(8);
DataSet<Tuple2<Long, Long>> verticesWithInitialId = env.fromElements(new Tuple2<Long, Long>(1L, 2L));
DataSet<Tuple2<Long, Long>> edges = env.fromElements(new Tuple2<Long, Long>(1L, 2L));
DataSet<Tuple2<Long, Long>> bulkResult = doBulkIteration(verticesWithInitialId, edges);
DataSet<Tuple2<Long, Long>> mappedBulk = bulkResult.map(new DummyMap());
DataSet<Tuple2<Long, Long>> depResult = doDeltaIteration(mappedBulk, edges);
depResult.output(new DiscardingOutputFormat<Tuple2<Long, Long>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
assertEquals(1, op.getDataSinks().size());
assertTrue(op.getDataSinks().iterator().next().getInput().getSource() instanceof WorksetIterationPlanNode);
WorksetIterationPlanNode wipn = (WorksetIterationPlanNode) op.getDataSinks().iterator().next().getInput().getSource();
assertEquals(ShipStrategyType.PARTITION_HASH, wipn.getInput1().getShipStrategy());
assertEquals(TempMode.NONE, wipn.getInput1().getTempMode());
assertEquals(TempMode.NONE, wipn.getInput2().getTempMode());
assertEquals(DataExchangeMode.BATCH, wipn.getInput1().getDataExchangeMode());
assertEquals(DataExchangeMode.BATCH, wipn.getInput2().getDataExchangeMode());
new JobGraphGenerator().compileJobGraph(op);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations