use of org.apache.flink.optimizer.plan.OptimizedPlan in project flink by apache.
the class BroadcastVariablePipelinebreakerTest method testNoBreakerForIndependentVariable.
@Test
public void testNoBreakerForIndependentVariable() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<String> source1 = env.fromElements("test");
DataSet<String> source2 = env.fromElements("test");
source1.map(new IdentityMapper<String>()).withBroadcastSet(source2, "some name").output(new DiscardingOutputFormat<String>());
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sink = op.getDataSinks().iterator().next();
SingleInputPlanNode mapper = (SingleInputPlanNode) sink.getInput().getSource();
assertEquals(TempMode.NONE, mapper.getInput().getTempMode());
assertEquals(TempMode.NONE, mapper.getBroadcastInputs().get(0).getTempMode());
assertEquals(DataExchangeMode.PIPELINED, mapper.getInput().getDataExchangeMode());
assertEquals(DataExchangeMode.PIPELINED, mapper.getBroadcastInputs().get(0).getDataExchangeMode());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.OptimizedPlan in project flink by apache.
the class CachedMatchStrategyCompilerTest method testLeftSide.
/**
* This tests whether a HYBRIDHASH_BUILD_FIRST is correctly transformed to a HYBRIDHASH_BUILD_FIRST_CACHED
* when inside of an iteration an on the static path
*/
@Test
public void testLeftSide() {
try {
Plan plan = getTestPlanLeftStatic(Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_FIRST);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_FIRST_CACHED, innerJoin.getDriverStrategy());
assertEquals(TempMode.NONE, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.NONE, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.OptimizedPlan in project flink by apache.
the class CachedMatchStrategyCompilerTest method testLeftSideCountercheck.
/**
* This test makes sure that only a HYBRIDHASH on the static path is transformed to the cached variant
*/
@Test
public void testLeftSideCountercheck() {
try {
Plan plan = getTestPlanLeftStatic(Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_SECOND);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_SECOND, innerJoin.getDriverStrategy());
assertEquals(TempMode.CACHED, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.NONE, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.OptimizedPlan in project flink by apache.
the class CachedMatchStrategyCompilerTest method testCorrectChoosing.
/**
* This test simulates a join of a big left side with a small right side inside of an iteration, where the small side is on a static path.
* Currently the best execution plan is a HYBRIDHASH_BUILD_SECOND_CACHED, where the small side is hashed and cached.
* This test also makes sure that all relevant plans are correctly enumerated by the optimizer.
*/
@Test
public void testCorrectChoosing() {
try {
Plan plan = getTestPlanRightStatic("");
SourceCollectorVisitor sourceCollector = new SourceCollectorVisitor();
plan.accept(sourceCollector);
for (GenericDataSourceBase<?, ?> s : sourceCollector.getSources()) {
if (s.getName().equals("bigFile")) {
this.setSourceStatistics(s, 10000000, 1000);
} else if (s.getName().equals("smallFile")) {
this.setSourceStatistics(s, 100, 100);
}
}
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode innerJoin = resolver.getNode("DummyJoiner");
// verify correct join strategy
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_SECOND_CACHED, innerJoin.getDriverStrategy());
assertEquals(TempMode.NONE, innerJoin.getInput1().getTempMode());
assertEquals(TempMode.NONE, innerJoin.getInput2().getTempMode());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.OptimizedPlan in project flink by apache.
the class CoGroupSolutionSetFirstTest method testCoGroupSolutionSet.
@Test
public void testCoGroupSolutionSet() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple1<Integer>> raw = env.readCsvFile(IN_FILE).types(Integer.class);
DeltaIteration<Tuple1<Integer>, Tuple1<Integer>> iteration = raw.iterateDelta(raw, 1000, 0);
DataSet<Tuple1<Integer>> test = iteration.getWorkset().map(new SimpleMap());
DataSet<Tuple1<Integer>> delta = iteration.getSolutionSet().coGroup(test).where(0).equalTo(0).with(new SimpleCGroup());
DataSet<Tuple1<Integer>> feedback = iteration.getWorkset().map(new SimpleMap());
DataSet<Tuple1<Integer>> result = iteration.closeWith(delta, feedback);
result.output(new DiscardingOutputFormat<Tuple1<Integer>>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = null;
try {
oPlan = compileNoStats(plan);
} catch (CompilerException e) {
Assert.fail(e.getMessage());
}
oPlan.accept(new Visitor<PlanNode>() {
@Override
public boolean preVisit(PlanNode visitable) {
if (visitable instanceof WorksetIterationPlanNode) {
PlanNode deltaNode = ((WorksetIterationPlanNode) visitable).getSolutionSetDeltaPlanNode();
//get the CoGroup
DualInputPlanNode dpn = (DualInputPlanNode) deltaNode.getInputs().iterator().next().getSource();
Channel in1 = dpn.getInput1();
Channel in2 = dpn.getInput2();
Assert.assertTrue(in1.getLocalProperties().getOrdering() == null);
Assert.assertTrue(in2.getLocalProperties().getOrdering() != null);
Assert.assertTrue(in2.getLocalProperties().getOrdering().getInvolvedIndexes().contains(0));
Assert.assertTrue(in1.getShipStrategy() == ShipStrategyType.FORWARD);
Assert.assertTrue(in2.getShipStrategy() == ShipStrategyType.PARTITION_HASH);
return false;
}
return true;
}
@Override
public void postVisit(PlanNode visitable) {
}
});
}
Aggregations