use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class ConnectedComponentsCoGroupTest method testWorksetConnectedComponents.
@Test
public void testWorksetConnectedComponents() throws Exception {
Plan plan = getConnectedComponentsCoGroupPlan();
plan.setExecutionConfig(new ExecutionConfig());
OptimizedPlan optPlan = compileNoStats(plan);
OptimizerPlanNodeResolver or = getOptimizerPlanNodeResolver(optPlan);
if (PRINT_PLAN) {
PlanJSONDumpGenerator dumper = new PlanJSONDumpGenerator();
String json = dumper.getOptimizerPlanAsJSON(optPlan);
System.out.println(json);
}
SourcePlanNode vertexSource = or.getNode(VERTEX_SOURCE);
SourcePlanNode edgesSource = or.getNode(EDGES_SOURCE);
SinkPlanNode sink = or.getNode(SINK);
WorksetIterationPlanNode iter = or.getNode(ITERATION_NAME);
DualInputPlanNode neighborsJoin = or.getNode(JOIN_NEIGHBORS_MATCH);
DualInputPlanNode cogroup = or.getNode(MIN_ID_AND_UPDATE);
// --------------------------------------------------------------------
// Plan validation:
//
// We expect the plan to go with a sort-merge join, because the CoGroup
// sorts and the join in the successive iteration can re-exploit the sorting.
// --------------------------------------------------------------------
// test all drivers
Assert.assertEquals(DriverStrategy.NONE, sink.getDriverStrategy());
Assert.assertEquals(DriverStrategy.NONE, vertexSource.getDriverStrategy());
Assert.assertEquals(DriverStrategy.NONE, edgesSource.getDriverStrategy());
Assert.assertEquals(DriverStrategy.INNER_MERGE, neighborsJoin.getDriverStrategy());
Assert.assertEquals(set0, neighborsJoin.getKeysForInput1());
Assert.assertEquals(set0, neighborsJoin.getKeysForInput2());
Assert.assertEquals(DriverStrategy.CO_GROUP, cogroup.getDriverStrategy());
Assert.assertEquals(set0, cogroup.getKeysForInput1());
Assert.assertEquals(set0, cogroup.getKeysForInput2());
// test all the shipping strategies
Assert.assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iter.getInitialSolutionSetInput().getShipStrategy());
Assert.assertEquals(set0, iter.getInitialSolutionSetInput().getShipStrategyKeys());
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iter.getInitialWorksetInput().getShipStrategy());
Assert.assertEquals(set0, iter.getInitialWorksetInput().getShipStrategyKeys());
Assert.assertEquals(ShipStrategyType.FORWARD, // workset
neighborsJoin.getInput1().getShipStrategy());
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, // edges
neighborsJoin.getInput2().getShipStrategy());
Assert.assertEquals(set0, neighborsJoin.getInput2().getShipStrategyKeys());
Assert.assertTrue(neighborsJoin.getInput2().getTempMode().isCached());
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, // min id
cogroup.getInput1().getShipStrategy());
Assert.assertEquals(ShipStrategyType.FORWARD, // solution set
cogroup.getInput2().getShipStrategy());
// test all the local strategies
Assert.assertEquals(LocalStrategy.NONE, sink.getInput().getLocalStrategy());
Assert.assertEquals(LocalStrategy.NONE, iter.getInitialSolutionSetInput().getLocalStrategy());
// the sort for the neighbor join in the first iteration is pushed out of the loop
Assert.assertEquals(LocalStrategy.SORT, iter.getInitialWorksetInput().getLocalStrategy());
Assert.assertEquals(LocalStrategy.NONE, // workset
neighborsJoin.getInput1().getLocalStrategy());
Assert.assertEquals(LocalStrategy.SORT, // edges
neighborsJoin.getInput2().getLocalStrategy());
Assert.assertEquals(LocalStrategy.SORT, cogroup.getInput1().getLocalStrategy());
Assert.assertEquals(LocalStrategy.NONE, // solution set
cogroup.getInput2().getLocalStrategy());
// check the caches
Assert.assertTrue(TempMode.CACHED == neighborsJoin.getInput2().getTempMode());
JobGraphGenerator jgg = new JobGraphGenerator();
jgg.compileJobGraph(optPlan);
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class PageRankCompilerTest method testPageRank.
@Test
public void testPageRank() {
try {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// get input data
DataSet<Long> pagesInput = env.fromElements(1L);
@SuppressWarnings("unchecked") DataSet<Tuple2<Long, Long>> linksInput = env.fromElements(new Tuple2<Long, Long>(1L, 2L));
// assign initial rank to pages
DataSet<Tuple2<Long, Double>> pagesWithRanks = pagesInput.map(new RankAssigner((1.0d / 10)));
// build adjacency list from link input
DataSet<Tuple2<Long, Long[]>> adjacencyListInput = linksInput.groupBy(0).reduceGroup(new BuildOutgoingEdgeList());
// set iterative data set
IterativeDataSet<Tuple2<Long, Double>> iteration = pagesWithRanks.iterate(10);
Configuration cfg = new Configuration();
cfg.setString(Optimizer.HINT_LOCAL_STRATEGY, Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_SECOND);
DataSet<Tuple2<Long, Double>> newRanks = iteration.join(adjacencyListInput).where(0).equalTo(0).withParameters(cfg).flatMap(new JoinVertexWithEdgesMatch()).groupBy(0).aggregate(SUM, 1).map(new Dampener(0.85, 10));
DataSet<Tuple2<Long, Double>> finalPageRanks = iteration.closeWith(newRanks, newRanks.join(iteration).where(0).equalTo(0).filter(new EpsilonFilter()));
finalPageRanks.output(new DiscardingOutputFormat<Tuple2<Long, Double>>());
// get the plan and compile it
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
SinkPlanNode sinkPlanNode = (SinkPlanNode) op.getDataSinks().iterator().next();
BulkIterationPlanNode iterPlanNode = (BulkIterationPlanNode) sinkPlanNode.getInput().getSource();
// check that the partitioning is pushed out of the first loop
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iterPlanNode.getInput().getShipStrategy());
Assert.assertEquals(LocalStrategy.NONE, iterPlanNode.getInput().getLocalStrategy());
BulkPartialSolutionPlanNode partSolPlanNode = iterPlanNode.getPartialSolutionPlanNode();
Assert.assertEquals(ShipStrategyType.FORWARD, partSolPlanNode.getOutgoingChannels().get(0).getShipStrategy());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class GenericFlatTypePostPass method traverse.
@SuppressWarnings("unchecked")
protected void traverse(PlanNode node, T parentSchema, boolean createUtilities) {
// distinguish the node types
if (node instanceof SinkPlanNode) {
SinkPlanNode sn = (SinkPlanNode) node;
Channel inchannel = sn.getInput();
T schema = createEmptySchema();
sn.postPassHelper = schema;
// add the sinks information to the schema
try {
getSinkSchema(sn, schema);
} catch (ConflictingFieldTypeInfoException e) {
throw new CompilerPostPassException("Conflicting type infomation for the data sink '" + sn.getSinkNode().getOperator().getName() + "'.");
}
// descend to the input channel
try {
propagateToChannel(schema, inchannel, createUtilities);
} catch (MissingFieldTypeInfoException ex) {
throw new CompilerPostPassException("Missing type infomation for the channel that inputs to the data sink '" + sn.getSinkNode().getOperator().getName() + "'.");
}
} else if (node instanceof SourcePlanNode) {
if (createUtilities) {
((SourcePlanNode) node).setSerializer(createSerializer(parentSchema, node));
// nothing else to be done here. the source has no input and no strategy itself
}
} else if (node instanceof BulkIterationPlanNode) {
BulkIterationPlanNode iterationNode = (BulkIterationPlanNode) node;
// get the nodes current schema
T schema;
if (iterationNode.postPassHelper == null) {
schema = createEmptySchema();
iterationNode.postPassHelper = schema;
} else {
schema = (T) iterationNode.postPassHelper;
}
schema.increaseNumConnectionsThatContributed();
// add the parent schema to the schema
if (propagateParentSchemaDown) {
addSchemaToSchema(parentSchema, schema, iterationNode.getProgramOperator().getName());
}
// check whether all outgoing channels have not yet contributed. come back later if not.
if (schema.getNumConnectionsThatContributed() < iterationNode.getOutgoingChannels().size()) {
return;
}
if (iterationNode.getRootOfStepFunction() instanceof NAryUnionPlanNode) {
throw new CompilerException("Optimizer cannot compile an iteration step function where next partial solution is created by a Union node.");
}
// traverse the termination criterion for the first time. create schema only, no utilities. Needed in case of intermediate termination criterion
if (iterationNode.getRootOfTerminationCriterion() != null) {
SingleInputPlanNode addMapper = (SingleInputPlanNode) iterationNode.getRootOfTerminationCriterion();
traverse(addMapper.getInput().getSource(), createEmptySchema(), false);
try {
addMapper.getInput().setSerializer(createSerializer(createEmptySchema()));
} catch (MissingFieldTypeInfoException e) {
throw new RuntimeException(e);
}
}
// traverse the step function for the first time. create schema only, no utilities
traverse(iterationNode.getRootOfStepFunction(), schema, false);
T pss = (T) iterationNode.getPartialSolutionPlanNode().postPassHelper;
if (pss == null) {
throw new CompilerException("Error in Optimizer Post Pass: Partial solution schema is null after first traversal of the step function.");
}
// traverse the step function for the second time, taking the schema of the partial solution
traverse(iterationNode.getRootOfStepFunction(), pss, createUtilities);
if (iterationNode.getRootOfTerminationCriterion() != null) {
SingleInputPlanNode addMapper = (SingleInputPlanNode) iterationNode.getRootOfTerminationCriterion();
traverse(addMapper.getInput().getSource(), createEmptySchema(), createUtilities);
try {
addMapper.getInput().setSerializer(createSerializer(createEmptySchema()));
} catch (MissingFieldTypeInfoException e) {
throw new RuntimeException(e);
}
}
// take the schema from the partial solution node and add its fields to the iteration result schema.
// input and output schema need to be identical, so this is essentially a sanity check
addSchemaToSchema(pss, schema, iterationNode.getProgramOperator().getName());
// set the serializer
if (createUtilities) {
iterationNode.setSerializerForIterationChannel(createSerializer(pss, iterationNode.getPartialSolutionPlanNode()));
}
// done, we can now propagate our info down
try {
propagateToChannel(schema, iterationNode.getInput(), createUtilities);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for input channel to node '" + iterationNode.getProgramOperator().getName() + "'. Missing type information for key field " + e.getFieldNumber());
}
} else if (node instanceof WorksetIterationPlanNode) {
WorksetIterationPlanNode iterationNode = (WorksetIterationPlanNode) node;
// get the nodes current schema
T schema;
if (iterationNode.postPassHelper == null) {
schema = createEmptySchema();
iterationNode.postPassHelper = schema;
} else {
schema = (T) iterationNode.postPassHelper;
}
schema.increaseNumConnectionsThatContributed();
// add the parent schema to the schema (which refers to the solution set schema)
if (propagateParentSchemaDown) {
addSchemaToSchema(parentSchema, schema, iterationNode.getProgramOperator().getName());
}
// check whether all outgoing channels have not yet contributed. come back later if not.
if (schema.getNumConnectionsThatContributed() < iterationNode.getOutgoingChannels().size()) {
return;
}
if (iterationNode.getNextWorkSetPlanNode() instanceof NAryUnionPlanNode) {
throw new CompilerException("Optimizer cannot compile a workset iteration step function where the next workset is produced by a Union node.");
}
if (iterationNode.getSolutionSetDeltaPlanNode() instanceof NAryUnionPlanNode) {
throw new CompilerException("Optimizer cannot compile a workset iteration step function where the solution set delta is produced by a Union node.");
}
// traverse the step function
// pass an empty schema to the next workset and the parent schema to the solution set delta
// these first traversals are schema only
traverse(iterationNode.getNextWorkSetPlanNode(), createEmptySchema(), false);
traverse(iterationNode.getSolutionSetDeltaPlanNode(), schema, false);
T wss = (T) iterationNode.getWorksetPlanNode().postPassHelper;
T sss = (T) iterationNode.getSolutionSetPlanNode().postPassHelper;
if (wss == null) {
throw new CompilerException("Error in Optimizer Post Pass: Workset schema is null after first traversal of the step function.");
}
if (sss == null) {
throw new CompilerException("Error in Optimizer Post Pass: Solution set schema is null after first traversal of the step function.");
}
// make the second pass and instantiate the utilities
traverse(iterationNode.getNextWorkSetPlanNode(), wss, createUtilities);
traverse(iterationNode.getSolutionSetDeltaPlanNode(), sss, createUtilities);
// the solution set input and the result must have the same schema, this acts as a sanity check.
try {
for (Map.Entry<Integer, X> entry : sss) {
Integer pos = entry.getKey();
schema.addType(pos, entry.getValue());
}
} catch (ConflictingFieldTypeInfoException e) {
throw new CompilerPostPassException("Conflicting type information for field " + e.getFieldNumber() + " in node '" + iterationNode.getProgramOperator().getName() + "'. Contradicting types between the " + "result of the iteration and the solution set schema: " + e.getPreviousType() + " and " + e.getNewType() + ". Most probable cause: Invalid constant field annotations.");
}
// set the serializers and comparators
if (createUtilities) {
WorksetIterationNode optNode = iterationNode.getIterationNode();
iterationNode.setWorksetSerializer(createSerializer(wss, iterationNode.getWorksetPlanNode()));
iterationNode.setSolutionSetSerializer(createSerializer(sss, iterationNode.getSolutionSetPlanNode()));
try {
iterationNode.setSolutionSetComparator(createComparator(optNode.getSolutionSetKeyFields(), null, sss));
} catch (MissingFieldTypeInfoException ex) {
throw new CompilerPostPassException("Could not set up the solution set for workset iteration '" + optNode.getOperator().getName() + "'. Missing type information for key field " + ex.getFieldNumber() + '.');
}
}
// done, we can now propagate our info down
try {
propagateToChannel(schema, iterationNode.getInitialSolutionSetInput(), createUtilities);
propagateToChannel(wss, iterationNode.getInitialWorksetInput(), createUtilities);
} catch (MissingFieldTypeInfoException ex) {
throw new CompilerPostPassException("Could not set up runtime strategy for input channel to node '" + iterationNode.getProgramOperator().getName() + "'. Missing type information for key field " + ex.getFieldNumber());
}
} else if (node instanceof SingleInputPlanNode) {
SingleInputPlanNode sn = (SingleInputPlanNode) node;
// get the nodes current schema
T schema;
if (sn.postPassHelper == null) {
schema = createEmptySchema();
sn.postPassHelper = schema;
} else {
schema = (T) sn.postPassHelper;
}
schema.increaseNumConnectionsThatContributed();
SingleInputNode optNode = sn.getSingleInputNode();
// add the parent schema to the schema
if (propagateParentSchemaDown) {
addSchemaToSchema(parentSchema, schema, optNode, 0);
}
// check whether all outgoing channels have not yet contributed. come back later if not.
if (schema.getNumConnectionsThatContributed() < sn.getOutgoingChannels().size()) {
return;
}
// add the nodes local information
try {
getSingleInputNodeSchema(sn, schema);
} catch (ConflictingFieldTypeInfoException e) {
throw new CompilerPostPassException(getConflictingTypeErrorMessage(e, optNode.getOperator().getName()));
}
if (createUtilities) {
// parameterize the node's driver strategy
for (int i = 0; i < sn.getDriverStrategy().getNumRequiredComparators(); i++) {
try {
sn.setComparator(createComparator(sn.getKeys(i), sn.getSortOrders(i), schema), i);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for node '" + optNode.getOperator().getName() + "'. Missing type information for key field " + e.getFieldNumber());
}
}
}
// done, we can now propagate our info down
try {
propagateToChannel(schema, sn.getInput(), createUtilities);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for input channel to node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
// don't forget the broadcast inputs
for (Channel c : sn.getBroadcastInputs()) {
try {
propagateToChannel(createEmptySchema(), c, createUtilities);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for broadcast channel in node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
}
} else if (node instanceof DualInputPlanNode) {
DualInputPlanNode dn = (DualInputPlanNode) node;
// get the nodes current schema
T schema1;
T schema2;
if (dn.postPassHelper1 == null) {
schema1 = createEmptySchema();
schema2 = createEmptySchema();
dn.postPassHelper1 = schema1;
dn.postPassHelper2 = schema2;
} else {
schema1 = (T) dn.postPassHelper1;
schema2 = (T) dn.postPassHelper2;
}
schema1.increaseNumConnectionsThatContributed();
schema2.increaseNumConnectionsThatContributed();
TwoInputNode optNode = dn.getTwoInputNode();
// add the parent schema to the schema
if (propagateParentSchemaDown) {
addSchemaToSchema(parentSchema, schema1, optNode, 0);
addSchemaToSchema(parentSchema, schema2, optNode, 1);
}
// check whether all outgoing channels have not yet contributed. come back later if not.
if (schema1.getNumConnectionsThatContributed() < dn.getOutgoingChannels().size()) {
return;
}
// add the nodes local information
try {
getDualInputNodeSchema(dn, schema1, schema2);
} catch (ConflictingFieldTypeInfoException e) {
throw new CompilerPostPassException(getConflictingTypeErrorMessage(e, optNode.getOperator().getName()));
}
// parameterize the node's driver strategy
if (createUtilities) {
if (dn.getDriverStrategy().getNumRequiredComparators() > 0) {
// set the individual comparators
try {
dn.setComparator1(createComparator(dn.getKeysForInput1(), dn.getSortOrders(), schema1));
dn.setComparator2(createComparator(dn.getKeysForInput2(), dn.getSortOrders(), schema2));
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
// set the pair comparator
try {
dn.setPairComparator(createPairComparator(dn.getKeysForInput1(), dn.getKeysForInput2(), dn.getSortOrders(), schema1, schema2));
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
}
}
// done, we can now propagate our info down
try {
propagateToChannel(schema1, dn.getInput1(), createUtilities);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for the first input channel to node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
try {
propagateToChannel(schema2, dn.getInput2(), createUtilities);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for the second input channel to node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
// don't forget the broadcast inputs
for (Channel c : dn.getBroadcastInputs()) {
try {
propagateToChannel(createEmptySchema(), c, createUtilities);
} catch (MissingFieldTypeInfoException e) {
throw new CompilerPostPassException("Could not set up runtime strategy for broadcast channel in node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
}
}
} else if (node instanceof NAryUnionPlanNode) {
// only propagate the info down
try {
for (Channel channel : node.getInputs()) {
propagateToChannel(parentSchema, channel, createUtilities);
}
} catch (MissingFieldTypeInfoException ex) {
throw new CompilerPostPassException("Could not set up runtime strategy for the input channel to " + " a union node. Missing type information for field " + ex.getFieldNumber());
}
} else // catch the sources of the iterative step functions
if (node instanceof BulkPartialSolutionPlanNode || node instanceof SolutionSetPlanNode || node instanceof WorksetPlanNode) {
// get the nodes current schema
T schema;
String name;
if (node instanceof BulkPartialSolutionPlanNode) {
BulkPartialSolutionPlanNode psn = (BulkPartialSolutionPlanNode) node;
if (psn.postPassHelper == null) {
schema = createEmptySchema();
psn.postPassHelper = schema;
} else {
schema = (T) psn.postPassHelper;
}
name = "partial solution of bulk iteration '" + psn.getPartialSolutionNode().getIterationNode().getOperator().getName() + "'";
} else if (node instanceof SolutionSetPlanNode) {
SolutionSetPlanNode ssn = (SolutionSetPlanNode) node;
if (ssn.postPassHelper == null) {
schema = createEmptySchema();
ssn.postPassHelper = schema;
} else {
schema = (T) ssn.postPassHelper;
}
name = "solution set of workset iteration '" + ssn.getSolutionSetNode().getIterationNode().getOperator().getName() + "'";
} else if (node instanceof WorksetPlanNode) {
WorksetPlanNode wsn = (WorksetPlanNode) node;
if (wsn.postPassHelper == null) {
schema = createEmptySchema();
wsn.postPassHelper = schema;
} else {
schema = (T) wsn.postPassHelper;
}
name = "workset of workset iteration '" + wsn.getWorksetNode().getIterationNode().getOperator().getName() + "'";
} else {
throw new CompilerException();
}
schema.increaseNumConnectionsThatContributed();
// add the parent schema to the schema
addSchemaToSchema(parentSchema, schema, name);
} else {
throw new CompilerPostPassException("Unknown node type encountered: " + node.getClass().getName());
}
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class PregelCompilerTest method testPregelCompiler.
@SuppressWarnings("serial")
@Test
public void testPregelCompiler() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
// compose test program
{
DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<>());
DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {
public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
}
});
Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), null, 100).getVertices();
result.output(new DiscardingOutputFormat<>());
}
Plan p = env.createProgramPlan("Pregel Connected Components");
OptimizedPlan op = compileNoStats(p);
// check the sink
SinkPlanNode sink = op.getDataSinks().iterator().next();
assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
// check the iteration
WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
// check the solution set delta
PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
assertTrue(ssDelta instanceof SingleInputPlanNode);
SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
// check the computation coGroup
DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
// check that the initial partitioning is pushed out of the loop
assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
}
use of org.apache.flink.optimizer.plan.SinkPlanNode in project flink by apache.
the class PregelCompilerTest method testPregelWithCombiner.
@SuppressWarnings("serial")
@Test
public void testPregelWithCombiner() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
// compose test program
{
DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<>());
DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {
public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
}
});
Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), new CCCombiner(), 100).getVertices();
result.output(new DiscardingOutputFormat<>());
}
Plan p = env.createProgramPlan("Pregel Connected Components");
OptimizedPlan op = compileNoStats(p);
// check the sink
SinkPlanNode sink = op.getDataSinks().iterator().next();
assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
// check the iteration
WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
// check the combiner
SingleInputPlanNode combiner = (SingleInputPlanNode) iteration.getInput2().getSource();
assertEquals(ShipStrategyType.FORWARD, combiner.getInput().getShipStrategy());
// check the solution set delta
PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
assertTrue(ssDelta instanceof SingleInputPlanNode);
SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
// check the computation coGroup
DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
// check that the initial partitioning is pushed out of the loop
assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
}
Aggregations