use of org.apache.flink.optimizer.plan.DualInputPlanNode in project flink by apache.
the class CoGroupCustomPartitioningTest method testCoGroupWithPojos.
@Test
public void testCoGroupWithPojos() {
try {
final Partitioner<Integer> partitioner = new TestPartitionerInt();
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Pojo2> input1 = env.fromElements(new Pojo2());
DataSet<Pojo3> input2 = env.fromElements(new Pojo3());
input1.coGroup(input2).where("b").equalTo("a").withPartitioner(partitioner).with(new DummyCoGroupFunction<Pojo2, Pojo3>()).output(new DiscardingOutputFormat<Tuple2<Pojo2, Pojo3>>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
DualInputPlanNode join = (DualInputPlanNode) sink.getInput().getSource();
assertEquals(ShipStrategyType.PARTITION_CUSTOM, join.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_CUSTOM, join.getInput2().getShipStrategy());
assertEquals(partitioner, join.getInput1().getPartitioner());
assertEquals(partitioner, join.getInput2().getPartitioner());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.DualInputPlanNode in project flink by apache.
the class CachedMatchStrategyCompilerTest method testRightSide.
/**
* This tests whether a HYBRIDHASH_BUILD_SECOND is correctly transformed to a HYBRIDHASH_BUILD_SECOND_CACHED
* when inside of an iteration an on the static path
*/
@Test
public void testRightSide() {
try {
Plan plan = getTestPlanRightStatic(Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_SECOND);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_SECOND_CACHED, innerJoin.getDriverStrategy());
assertEquals(TempMode.NONE, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.NONE, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.DualInputPlanNode in project flink by apache.
the class CachedMatchStrategyCompilerTest method testRightSideCountercheck.
/**
* This test makes sure that only a HYBRIDHASH on the static path is transformed to the cached variant
*/
@Test
public void testRightSideCountercheck() {
try {
Plan plan = getTestPlanRightStatic(Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_FIRST);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_FIRST, innerJoin.getDriverStrategy());
assertEquals(TempMode.NONE, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.CACHED, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.DualInputPlanNode in project flink by apache.
the class PartitioningReusageTest method reuseBothPartitioningCoGroup7.
@Test
public void reuseBothPartitioningCoGroup7() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Integer, Integer>> set1 = env.readCsvFile(IN_FILE).types(Integer.class, Integer.class, Integer.class);
DataSet<Tuple3<Integer, Integer, Integer>> set2 = env.readCsvFile(IN_FILE).types(Integer.class, Integer.class, Integer.class);
DataSet<Tuple3<Integer, Integer, Integer>> coGrouped = set1.partitionByHash(2).map(new MockMapper()).withForwardedFields("2").coGroup(set2.partitionByHash(1).map(new MockMapper()).withForwardedFields("1")).where(0, 2).equalTo(1, 2).with(new MockCoGroup());
coGrouped.output(new DiscardingOutputFormat<Tuple3<Integer, Integer, Integer>>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = compileWithStats(plan);
SinkPlanNode sink = oPlan.getDataSinks().iterator().next();
DualInputPlanNode coGroup = (DualInputPlanNode) sink.getInput().getSource();
checkValidCoGroupInputProperties(coGroup);
}
use of org.apache.flink.optimizer.plan.DualInputPlanNode in project flink by apache.
the class PartitioningReusageTest method reuseSinglePartitioningJoin1.
@Test
public void reuseSinglePartitioningJoin1() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Integer, Integer>> set1 = env.readCsvFile(IN_FILE).types(Integer.class, Integer.class, Integer.class);
DataSet<Tuple3<Integer, Integer, Integer>> set2 = env.readCsvFile(IN_FILE).types(Integer.class, Integer.class, Integer.class);
DataSet<Tuple3<Integer, Integer, Integer>> joined = set1.partitionByHash(0, 1).map(new MockMapper()).withForwardedFields("0;1").join(set2, JoinOperatorBase.JoinHint.REPARTITION_HASH_FIRST).where(0, 1).equalTo(0, 1).with(new MockJoin());
joined.output(new DiscardingOutputFormat<Tuple3<Integer, Integer, Integer>>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = compileWithStats(plan);
SinkPlanNode sink = oPlan.getDataSinks().iterator().next();
DualInputPlanNode join = (DualInputPlanNode) sink.getInput().getSource();
checkValidJoinInputProperties(join);
}
Aggregations