use of org.apache.flink.optimizer.CompilerException in project flink by apache.
the class TwoInputNode method getAlternativePlans.
@Override
public List<PlanNode> getAlternativePlans(CostEstimator estimator) {
// check if we have a cached version
if (this.cachedPlans != null) {
return this.cachedPlans;
}
boolean childrenSkippedDueToReplicatedInput = false;
// step down to all producer nodes and calculate alternative plans
final List<? extends PlanNode> subPlans1 = getFirstPredecessorNode().getAlternativePlans(estimator);
final List<? extends PlanNode> subPlans2 = getSecondPredecessorNode().getAlternativePlans(estimator);
// calculate alternative sub-plans for predecessor
final Set<RequestedGlobalProperties> intGlobal1 = this.input1.getInterestingProperties().getGlobalProperties();
final Set<RequestedGlobalProperties> intGlobal2 = this.input2.getInterestingProperties().getGlobalProperties();
// calculate alternative sub-plans for broadcast inputs
final List<Set<? extends NamedChannel>> broadcastPlanChannels = new ArrayList<Set<? extends NamedChannel>>();
List<DagConnection> broadcastConnections = getBroadcastConnections();
List<String> broadcastConnectionNames = getBroadcastConnectionNames();
for (int i = 0; i < broadcastConnections.size(); i++) {
DagConnection broadcastConnection = broadcastConnections.get(i);
String broadcastConnectionName = broadcastConnectionNames.get(i);
List<PlanNode> broadcastPlanCandidates = broadcastConnection.getSource().getAlternativePlans(estimator);
// wrap the plan candidates in named channels
HashSet<NamedChannel> broadcastChannels = new HashSet<NamedChannel>(broadcastPlanCandidates.size());
for (PlanNode plan : broadcastPlanCandidates) {
final NamedChannel c = new NamedChannel(broadcastConnectionName, plan);
DataExchangeMode exMode = DataExchangeMode.select(broadcastConnection.getDataExchangeMode(), ShipStrategyType.BROADCAST, broadcastConnection.isBreakingPipeline());
c.setShipStrategy(ShipStrategyType.BROADCAST, exMode);
broadcastChannels.add(c);
}
broadcastPlanChannels.add(broadcastChannels);
}
final GlobalPropertiesPair[] allGlobalPairs;
final LocalPropertiesPair[] allLocalPairs;
{
Set<GlobalPropertiesPair> pairsGlob = new HashSet<GlobalPropertiesPair>();
Set<LocalPropertiesPair> pairsLoc = new HashSet<LocalPropertiesPair>();
for (OperatorDescriptorDual ods : getProperties()) {
pairsGlob.addAll(ods.getPossibleGlobalProperties());
pairsLoc.addAll(ods.getPossibleLocalProperties());
}
allGlobalPairs = pairsGlob.toArray(new GlobalPropertiesPair[pairsGlob.size()]);
allLocalPairs = pairsLoc.toArray(new LocalPropertiesPair[pairsLoc.size()]);
}
final ArrayList<PlanNode> outputPlans = new ArrayList<PlanNode>();
final ExecutionMode input1Mode = this.input1.getDataExchangeMode();
final ExecutionMode input2Mode = this.input2.getDataExchangeMode();
final int parallelism = getParallelism();
final int inParallelism1 = getFirstPredecessorNode().getParallelism();
final int inParallelism2 = getSecondPredecessorNode().getParallelism();
final boolean dopChange1 = parallelism != inParallelism1;
final boolean dopChange2 = parallelism != inParallelism2;
final boolean input1breaksPipeline = this.input1.isBreakingPipeline();
final boolean input2breaksPipeline = this.input2.isBreakingPipeline();
// create all candidates
for (PlanNode child1 : subPlans1) {
if (child1.getGlobalProperties().isFullyReplicated()) {
// fully replicated input is always locally forwarded if parallelism is not changed
if (dopChange1) {
// can not continue with this child
childrenSkippedDueToReplicatedInput = true;
continue;
} else {
this.input1.setShipStrategy(ShipStrategyType.FORWARD);
}
}
for (PlanNode child2 : subPlans2) {
if (child2.getGlobalProperties().isFullyReplicated()) {
// fully replicated input is always locally forwarded if parallelism is not changed
if (dopChange2) {
// can not continue with this child
childrenSkippedDueToReplicatedInput = true;
continue;
} else {
this.input2.setShipStrategy(ShipStrategyType.FORWARD);
}
}
// candidate at the joined branch plan.
if (!areBranchCompatible(child1, child2)) {
continue;
}
for (RequestedGlobalProperties igps1 : intGlobal1) {
// create a candidate channel for the first input. mark it cached, if the connection says so
final Channel c1 = new Channel(child1, this.input1.getMaterializationMode());
if (this.input1.getShipStrategy() == null) {
// free to choose the ship strategy
igps1.parameterizeChannel(c1, dopChange1, input1Mode, input1breaksPipeline);
// ship strategy preserves/establishes them even under changing parallelisms
if (dopChange1 && !c1.getShipStrategy().isNetworkStrategy()) {
c1.getGlobalProperties().reset();
}
} else {
// ship strategy fixed by compiler hint
ShipStrategyType shipType = this.input1.getShipStrategy();
DataExchangeMode exMode = DataExchangeMode.select(input1Mode, shipType, input1breaksPipeline);
if (this.keys1 != null) {
c1.setShipStrategy(shipType, this.keys1.toFieldList(), exMode);
} else {
c1.setShipStrategy(shipType, exMode);
}
if (dopChange1) {
c1.adjustGlobalPropertiesForFullParallelismChange();
}
}
for (RequestedGlobalProperties igps2 : intGlobal2) {
// create a candidate channel for the first input. mark it cached, if the connection says so
final Channel c2 = new Channel(child2, this.input2.getMaterializationMode());
if (this.input2.getShipStrategy() == null) {
// free to choose the ship strategy
igps2.parameterizeChannel(c2, dopChange2, input2Mode, input2breaksPipeline);
// ship strategy preserves/establishes them even under changing parallelisms
if (dopChange2 && !c2.getShipStrategy().isNetworkStrategy()) {
c2.getGlobalProperties().reset();
}
} else {
// ship strategy fixed by compiler hint
ShipStrategyType shipType = this.input2.getShipStrategy();
DataExchangeMode exMode = DataExchangeMode.select(input2Mode, shipType, input2breaksPipeline);
if (this.keys2 != null) {
c2.setShipStrategy(shipType, this.keys2.toFieldList(), exMode);
} else {
c2.setShipStrategy(shipType, exMode);
}
if (dopChange2) {
c2.adjustGlobalPropertiesForFullParallelismChange();
}
}
outer: for (GlobalPropertiesPair gpp : allGlobalPairs) {
if (gpp.getProperties1().isMetBy(c1.getGlobalProperties()) && gpp.getProperties2().isMetBy(c2.getGlobalProperties())) {
for (OperatorDescriptorDual desc : getProperties()) {
if (desc.areCompatible(gpp.getProperties1(), gpp.getProperties2(), c1.getGlobalProperties(), c2.getGlobalProperties())) {
Channel c1Clone = c1.clone();
c1Clone.setRequiredGlobalProps(gpp.getProperties1());
c2.setRequiredGlobalProps(gpp.getProperties2());
// we form a valid combination, so create the local candidates
// for this
addLocalCandidates(c1Clone, c2, broadcastPlanChannels, igps1, igps2, outputPlans, allLocalPairs, estimator);
break outer;
}
}
}
}
// so we can stop after the first
if (this.input2.getShipStrategy() != null) {
break;
}
}
// so we can stop after the first
if (this.input1.getShipStrategy() != null) {
break;
}
}
}
}
if (outputPlans.isEmpty()) {
if (childrenSkippedDueToReplicatedInput) {
throw new CompilerException("No plan meeting the requirements could be created @ " + this + ". Most likely reason: Invalid use of replicated input.");
} else {
throw new CompilerException("No plan meeting the requirements could be created @ " + this + ". Most likely reason: Too restrictive plan hints.");
}
}
// cost and prune the plans
for (PlanNode node : outputPlans) {
estimator.costOperator(node);
}
prunePlanAlternatives(outputPlans);
outputPlans.trimToSize();
this.cachedPlans = outputPlans;
return outputPlans;
}
use of org.apache.flink.optimizer.CompilerException in project flink by apache.
the class PlanNode method setCosts.
/**
* Sets the basic cost for this node to the given value, and sets the cumulative costs
* to those costs plus the cost shares of all inputs (regular and broadcast).
*
* @param nodeCosts The already knows costs for this node
* (this cost a produces by a concrete {@code OptimizerNode} subclass.
*/
public void setCosts(Costs nodeCosts) {
// set the node costs
this.nodeCosts = nodeCosts;
// the cumulative costs are the node costs plus the costs of all inputs
this.cumulativeCosts = nodeCosts.clone();
// add all the normal inputs
for (PlanNode pred : getPredecessors()) {
Costs parentCosts = pred.getCumulativeCostsShare();
if (parentCosts != null) {
this.cumulativeCosts.addCosts(parentCosts);
} else {
throw new CompilerException("Trying to set the costs of an operator before the predecessor costs are computed.");
}
}
// add all broadcast variable inputs
if (this.broadcastInputs != null) {
for (NamedChannel nc : this.broadcastInputs) {
Costs bcInputCost = nc.getSource().getCumulativeCostsShare();
if (bcInputCost != null) {
this.cumulativeCosts.addCosts(bcInputCost);
} else {
throw new CompilerException("Trying to set the costs of an operator before the broadcast input costs are computed.");
}
}
}
}
use of org.apache.flink.optimizer.CompilerException in project flink by apache.
the class SinkJoinerPlanNode method getDataSinks.
// --------------------------------------------------------------------------------------------
public void getDataSinks(List<SinkPlanNode> sinks) {
final PlanNode in1 = this.input1.getSource();
final PlanNode in2 = this.input2.getSource();
if (in1 instanceof SinkPlanNode) {
sinks.add((SinkPlanNode) in1);
} else if (in1 instanceof SinkJoinerPlanNode) {
((SinkJoinerPlanNode) in1).getDataSinks(sinks);
} else {
throw new CompilerException("Illegal child node for a sink joiner utility node: Neither Sink nor Sink Joiner");
}
if (in2 instanceof SinkPlanNode) {
sinks.add((SinkPlanNode) in2);
} else if (in2 instanceof SinkJoinerPlanNode) {
((SinkJoinerPlanNode) in2).getDataSinks(sinks);
} else {
throw new CompilerException("Illegal child node for a sink joiner utility node: Neither Sink nor Sink Joiner");
}
}
use of org.apache.flink.optimizer.CompilerException in project flink by apache.
the class PlanJSONDumpGenerator method visit.
private boolean visit(DumpableNode<?> node, PrintWriter writer, boolean first) {
// check for duplicate traversal
if (this.nodeIds.containsKey(node)) {
return false;
}
// assign an id first
this.nodeIds.put(node, this.nodeCnt++);
// then recurse
for (DumpableNode<?> child : node.getPredecessors()) {
//to set first to false!
if (visit(child, writer, first)) {
first = false;
}
}
// check if this node should be skipped from the dump
final OptimizerNode n = node.getOptimizerNode();
// start a new node and output node id
if (!first) {
writer.print(",\n");
}
// open the node
writer.print("\t{\n");
// recurse, it is is an iteration node
if (node instanceof BulkIterationNode || node instanceof BulkIterationPlanNode) {
DumpableNode<?> innerChild = node instanceof BulkIterationNode ? ((BulkIterationNode) node).getNextPartialSolution() : ((BulkIterationPlanNode) node).getRootOfStepFunction();
DumpableNode<?> begin = node instanceof BulkIterationNode ? ((BulkIterationNode) node).getPartialSolution() : ((BulkIterationPlanNode) node).getPartialSolutionPlanNode();
writer.print("\t\t\"step_function\": [\n");
visit(innerChild, writer, true);
writer.print("\n\t\t],\n");
writer.print("\t\t\"partial_solution\": " + this.nodeIds.get(begin) + ",\n");
writer.print("\t\t\"next_partial_solution\": " + this.nodeIds.get(innerChild) + ",\n");
} else if (node instanceof WorksetIterationNode || node instanceof WorksetIterationPlanNode) {
DumpableNode<?> worksetRoot = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getNextWorkset() : ((WorksetIterationPlanNode) node).getNextWorkSetPlanNode();
DumpableNode<?> solutionDelta = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getSolutionSetDelta() : ((WorksetIterationPlanNode) node).getSolutionSetDeltaPlanNode();
DumpableNode<?> workset = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getWorksetNode() : ((WorksetIterationPlanNode) node).getWorksetPlanNode();
DumpableNode<?> solutionSet = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getSolutionSetNode() : ((WorksetIterationPlanNode) node).getSolutionSetPlanNode();
writer.print("\t\t\"step_function\": [\n");
visit(worksetRoot, writer, true);
visit(solutionDelta, writer, false);
writer.print("\n\t\t],\n");
writer.print("\t\t\"workset\": " + this.nodeIds.get(workset) + ",\n");
writer.print("\t\t\"solution_set\": " + this.nodeIds.get(solutionSet) + ",\n");
writer.print("\t\t\"next_workset\": " + this.nodeIds.get(worksetRoot) + ",\n");
writer.print("\t\t\"solution_delta\": " + this.nodeIds.get(solutionDelta) + ",\n");
}
// print the id
writer.print("\t\t\"id\": " + this.nodeIds.get(node));
final String type;
String contents;
if (n instanceof DataSinkNode) {
type = "sink";
contents = n.getOperator().toString();
} else if (n instanceof DataSourceNode) {
type = "source";
contents = n.getOperator().toString();
} else if (n instanceof BulkIterationNode) {
type = "bulk_iteration";
contents = n.getOperator().getName();
} else if (n instanceof WorksetIterationNode) {
type = "workset_iteration";
contents = n.getOperator().getName();
} else if (n instanceof BinaryUnionNode) {
type = "pact";
contents = "";
} else {
type = "pact";
contents = n.getOperator().getName();
}
contents = StringUtils.showControlCharacters(contents);
if (encodeForHTML) {
contents = StringEscapeUtils.escapeHtml4(contents);
contents = contents.replace("\\", "\");
}
String name = n.getOperatorName();
if (name.equals("Reduce") && (node instanceof SingleInputPlanNode) && ((SingleInputPlanNode) node).getDriverStrategy() == DriverStrategy.SORTED_GROUP_COMBINE) {
name = "Combine";
}
// output the type identifier
writer.print(",\n\t\t\"type\": \"" + type + "\"");
// output node name
writer.print(",\n\t\t\"pact\": \"" + name + "\"");
// output node contents
writer.print(",\n\t\t\"contents\": \"" + contents + "\"");
// parallelism
writer.print(",\n\t\t\"parallelism\": \"" + (n.getParallelism() >= 1 ? n.getParallelism() : "default") + "\"");
// output node predecessors
Iterator<? extends DumpableConnection<?>> inConns = node.getDumpableInputs().iterator();
String child1name = "", child2name = "";
if (inConns != null && inConns.hasNext()) {
// start predecessor list
writer.print(",\n\t\t\"predecessors\": [");
int inputNum = 0;
while (inConns.hasNext()) {
final DumpableConnection<?> inConn = inConns.next();
final DumpableNode<?> source = inConn.getSource();
writer.print(inputNum == 0 ? "\n" : ",\n");
if (inputNum == 0) {
child1name += child1name.length() > 0 ? ", " : "";
child1name += source.getOptimizerNode().getOperator().getName() + " (id: " + this.nodeIds.get(source) + ")";
} else if (inputNum == 1) {
child2name += child2name.length() > 0 ? ", " : "";
child2name += source.getOptimizerNode().getOperator().getName() + " (id: " + this.nodeIds.get(source) + ")";
}
// output predecessor id
writer.print("\t\t\t{\"id\": " + this.nodeIds.get(source));
// output connection side
if (inConns.hasNext() || inputNum > 0) {
writer.print(", \"side\": \"" + (inputNum == 0 ? "first" : "second") + "\"");
}
// output shipping strategy and channel type
final Channel channel = (inConn instanceof Channel) ? (Channel) inConn : null;
final ShipStrategyType shipType = channel != null ? channel.getShipStrategy() : inConn.getShipStrategy();
String shipStrategy = null;
if (shipType != null) {
switch(shipType) {
case NONE:
// nothing
break;
case FORWARD:
shipStrategy = "Forward";
break;
case BROADCAST:
shipStrategy = "Broadcast";
break;
case PARTITION_HASH:
shipStrategy = "Hash Partition";
break;
case PARTITION_RANGE:
shipStrategy = "Range Partition";
break;
case PARTITION_RANDOM:
shipStrategy = "Redistribute";
break;
case PARTITION_FORCED_REBALANCE:
shipStrategy = "Rebalance";
break;
case PARTITION_CUSTOM:
shipStrategy = "Custom Partition";
break;
default:
throw new CompilerException("Unknown ship strategy '" + inConn.getShipStrategy().name() + "' in JSON generator.");
}
}
if (channel != null && channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
shipStrategy += " on " + (channel.getShipStrategySortOrder() == null ? channel.getShipStrategyKeys().toString() : Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
}
if (shipStrategy != null) {
writer.print(", \"ship_strategy\": \"" + shipStrategy + "\"");
}
if (channel != null) {
String localStrategy = null;
switch(channel.getLocalStrategy()) {
case NONE:
break;
case SORT:
localStrategy = "Sort";
break;
case COMBININGSORT:
localStrategy = "Sort (combining)";
break;
default:
throw new CompilerException("Unknown local strategy " + channel.getLocalStrategy().name());
}
if (channel != null && channel.getLocalStrategyKeys() != null && channel.getLocalStrategyKeys().size() > 0) {
localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ? channel.getLocalStrategyKeys().toString() : Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
}
if (localStrategy != null) {
writer.print(", \"local_strategy\": \"" + localStrategy + "\"");
}
if (channel != null && channel.getTempMode() != TempMode.NONE) {
String tempMode = channel.getTempMode().toString();
writer.print(", \"temp_mode\": \"" + tempMode + "\"");
}
if (channel != null) {
String exchangeMode = channel.getDataExchangeMode().toString();
writer.print(", \"exchange_mode\": \"" + exchangeMode + "\"");
}
}
writer.print('}');
inputNum++;
}
// finish predecessors
writer.print("\n\t\t]");
}
//---------------------------------------------------------------------------------------
// the part below here is relevant only to plan nodes with concrete strategies, etc
//---------------------------------------------------------------------------------------
final PlanNode p = node.getPlanNode();
if (p == null) {
// finish node
writer.print("\n\t}");
return true;
}
// local strategy
String locString = null;
if (p.getDriverStrategy() != null) {
switch(p.getDriverStrategy()) {
case NONE:
case BINARY_NO_OP:
break;
case UNARY_NO_OP:
locString = "No-Op";
break;
case MAP:
locString = "Map";
break;
case FLAT_MAP:
locString = "FlatMap";
break;
case MAP_PARTITION:
locString = "Map Partition";
break;
case ALL_REDUCE:
locString = "Reduce All";
break;
case ALL_GROUP_REDUCE:
case ALL_GROUP_REDUCE_COMBINE:
locString = "Group Reduce All";
break;
case SORTED_REDUCE:
locString = "Sorted Reduce";
break;
case SORTED_PARTIAL_REDUCE:
locString = "Sorted Combine/Reduce";
break;
case SORTED_GROUP_REDUCE:
locString = "Sorted Group Reduce";
break;
case SORTED_GROUP_COMBINE:
locString = "Sorted Combine";
break;
case HYBRIDHASH_BUILD_FIRST:
locString = "Hybrid Hash (build: " + child1name + ")";
break;
case HYBRIDHASH_BUILD_SECOND:
locString = "Hybrid Hash (build: " + child2name + ")";
break;
case HYBRIDHASH_BUILD_FIRST_CACHED:
locString = "Hybrid Hash (CACHED) (build: " + child1name + ")";
break;
case HYBRIDHASH_BUILD_SECOND_CACHED:
locString = "Hybrid Hash (CACHED) (build: " + child2name + ")";
break;
case NESTEDLOOP_BLOCKED_OUTER_FIRST:
locString = "Nested Loops (Blocked Outer: " + child1name + ")";
break;
case NESTEDLOOP_BLOCKED_OUTER_SECOND:
locString = "Nested Loops (Blocked Outer: " + child2name + ")";
break;
case NESTEDLOOP_STREAMED_OUTER_FIRST:
locString = "Nested Loops (Streamed Outer: " + child1name + ")";
break;
case NESTEDLOOP_STREAMED_OUTER_SECOND:
locString = "Nested Loops (Streamed Outer: " + child2name + ")";
break;
case INNER_MERGE:
locString = "Merge";
break;
case CO_GROUP:
locString = "Co-Group";
break;
default:
locString = p.getDriverStrategy().name();
break;
}
if (locString != null) {
writer.print(",\n\t\t\"driver_strategy\": \"");
writer.print(locString);
writer.print("\"");
}
}
{
// output node global properties
final GlobalProperties gp = p.getGlobalProperties();
writer.print(",\n\t\t\"global_properties\": [\n");
addProperty(writer, "Partitioning", gp.getPartitioning().name(), true);
if (gp.getPartitioningFields() != null) {
addProperty(writer, "Partitioned on", gp.getPartitioningFields().toString(), false);
}
if (gp.getPartitioningOrdering() != null) {
addProperty(writer, "Partitioning Order", gp.getPartitioningOrdering().toString(), false);
} else {
addProperty(writer, "Partitioning Order", "(none)", false);
}
if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
addProperty(writer, "Uniqueness", "not unique", false);
} else {
addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false);
}
writer.print("\n\t\t]");
}
{
// output node local properties
LocalProperties lp = p.getLocalProperties();
writer.print(",\n\t\t\"local_properties\": [\n");
if (lp.getOrdering() != null) {
addProperty(writer, "Order", lp.getOrdering().toString(), true);
} else {
addProperty(writer, "Order", "(none)", true);
}
if (lp.getGroupedFields() != null && lp.getGroupedFields().size() > 0) {
addProperty(writer, "Grouped on", lp.getGroupedFields().toString(), false);
} else {
addProperty(writer, "Grouping", "not grouped", false);
}
if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
addProperty(writer, "Uniqueness", "not unique", false);
} else {
addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false);
}
writer.print("\n\t\t]");
}
// output node size estimates
writer.print(",\n\t\t\"estimates\": [\n");
addProperty(writer, "Est. Output Size", n.getEstimatedOutputSize() == -1 ? "(unknown)" : formatNumber(n.getEstimatedOutputSize(), "B"), true);
addProperty(writer, "Est. Cardinality", n.getEstimatedNumRecords() == -1 ? "(unknown)" : formatNumber(n.getEstimatedNumRecords()), false);
writer.print("\t\t]");
// output node cost
if (p.getNodeCosts() != null) {
writer.print(",\n\t\t\"costs\": [\n");
addProperty(writer, "Network", p.getNodeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getNetworkCost(), "B"), true);
addProperty(writer, "Disk I/O", p.getNodeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getDiskCost(), "B"), false);
addProperty(writer, "CPU", p.getNodeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getCpuCost(), ""), false);
addProperty(writer, "Cumulative Network", p.getCumulativeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getNetworkCost(), "B"), false);
addProperty(writer, "Cumulative Disk I/O", p.getCumulativeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getDiskCost(), "B"), false);
addProperty(writer, "Cumulative CPU", p.getCumulativeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getCpuCost(), ""), false);
writer.print("\n\t\t]");
}
// output the node compiler hints
if (n.getOperator().getCompilerHints() != null) {
CompilerHints hints = n.getOperator().getCompilerHints();
CompilerHints defaults = new CompilerHints();
String size = hints.getOutputSize() == defaults.getOutputSize() ? "(none)" : String.valueOf(hints.getOutputSize());
String card = hints.getOutputCardinality() == defaults.getOutputCardinality() ? "(none)" : String.valueOf(hints.getOutputCardinality());
String width = hints.getAvgOutputRecordSize() == defaults.getAvgOutputRecordSize() ? "(none)" : String.valueOf(hints.getAvgOutputRecordSize());
String filter = hints.getFilterFactor() == defaults.getFilterFactor() ? "(none)" : String.valueOf(hints.getFilterFactor());
writer.print(",\n\t\t\"compiler_hints\": [\n");
addProperty(writer, "Output Size (bytes)", size, true);
addProperty(writer, "Output Cardinality", card, false);
addProperty(writer, "Avg. Output Record Size (bytes)", width, false);
addProperty(writer, "Filter Factor", filter, false);
writer.print("\t\t]");
}
// finish node
writer.print("\n\t}");
return true;
}
use of org.apache.flink.optimizer.CompilerException in project flink by apache.
the class JobGraphGenerator method postVisit.
/**
* This method implements the post-visit during the depth-first traversal. When the post visit happens,
* all of the descendants have been processed, so this method connects all of the current node's
* predecessors to the current node.
*
* @param node
* The node currently processed during the post-visit.
* @see org.apache.flink.util.Visitor#postVisit(org.apache.flink.util.Visitable) t
*/
@Override
public void postVisit(PlanNode node) {
try {
// solution sets have no input. the initial solution set input is connected when the iteration node is in its postVisit
if (node instanceof SourcePlanNode || node instanceof NAryUnionPlanNode || node instanceof SolutionSetPlanNode) {
return;
}
// check if we have an iteration. in that case, translate the step function now
if (node instanceof IterationPlanNode) {
// prevent nested iterations
if (node.isOnDynamicPath()) {
throw new CompilerException("Nested Iterations are not possible at the moment!");
}
// another one), we push the current one onto the stack
if (this.currentIteration != null) {
this.iterationStack.add(this.currentIteration);
}
this.currentIteration = (IterationPlanNode) node;
this.currentIteration.acceptForStepFunction(this);
// pop the current iteration from the stack
if (this.iterationStack.isEmpty()) {
this.currentIteration = null;
} else {
this.currentIteration = this.iterationStack.remove(this.iterationStack.size() - 1);
}
// connect the initial solution set now.
if (node instanceof WorksetIterationPlanNode) {
// connect the initial solution set
WorksetIterationPlanNode wsNode = (WorksetIterationPlanNode) node;
JobVertex headVertex = this.iterations.get(wsNode).getHeadTask();
TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration());
int inputIndex = headConfig.getDriverStrategy().getNumInputs();
headConfig.setIterationHeadSolutionSetInputIndex(inputIndex);
translateChannel(wsNode.getInitialSolutionSetInput(), inputIndex, headVertex, headConfig, false);
}
return;
}
final JobVertex targetVertex = this.vertices.get(node);
// check whether this node has its own task, or is merged with another one
if (targetVertex == null) {
// node's task is merged with another task. it is either chained, of a merged head vertex
// from an iteration
final TaskInChain chainedTask;
if ((chainedTask = this.chainedTasks.get(node)) != null) {
// Chained Task. Sanity check first...
final Iterator<Channel> inConns = node.getInputs().iterator();
if (!inConns.hasNext()) {
throw new CompilerException("Bug: Found chained task with no input.");
}
final Channel inConn = inConns.next();
if (inConns.hasNext()) {
throw new CompilerException("Bug: Found a chained task with more than one input!");
}
if (inConn.getLocalStrategy() != null && inConn.getLocalStrategy() != LocalStrategy.NONE) {
throw new CompilerException("Bug: Found a chained task with an input local strategy.");
}
if (inConn.getShipStrategy() != null && inConn.getShipStrategy() != ShipStrategyType.FORWARD) {
throw new CompilerException("Bug: Found a chained task with an input ship strategy other than FORWARD.");
}
JobVertex container = chainedTask.getContainingVertex();
if (container == null) {
final PlanNode sourceNode = inConn.getSource();
container = this.vertices.get(sourceNode);
if (container == null) {
// predecessor is itself chained
container = this.chainedTasks.get(sourceNode).getContainingVertex();
if (container == null) {
throw new IllegalStateException("Bug: Chained task predecessor has not been assigned its containing vertex.");
}
} else {
// predecessor is a proper task job vertex and this is the first chained task. add a forward connection entry.
new TaskConfig(container.getConfiguration()).addOutputShipStrategy(ShipStrategyType.FORWARD);
}
chainedTask.setContainingVertex(container);
}
// add info about the input serializer type
chainedTask.getTaskConfig().setInputSerializer(inConn.getSerializer(), 0);
// update name of container task
String containerTaskName = container.getName();
if (containerTaskName.startsWith("CHAIN ")) {
container.setName(containerTaskName + " -> " + chainedTask.getTaskName());
} else {
container.setName("CHAIN " + containerTaskName + " -> " + chainedTask.getTaskName());
}
//update resource of container task
container.setResources(container.getMinResources().merge(node.getMinResources()), container.getPreferredResources().merge(node.getPreferredResources()));
this.chainedTasksInSequence.add(chainedTask);
return;
} else if (node instanceof BulkPartialSolutionPlanNode || node instanceof WorksetPlanNode) {
// merged iteration head task. the task that the head is merged with will take care of it
return;
} else {
throw new CompilerException("Bug: Unrecognized merged task vertex.");
}
}
if (this.currentIteration != null) {
JobVertex head = this.iterations.get(this.currentIteration).getHeadTask();
// their execution determines the deployment slots of the co-location group
if (node.isOnDynamicPath()) {
targetVertex.setStrictlyCoLocatedWith(head);
}
}
// create the config that will contain all the description of the inputs
final TaskConfig targetVertexConfig = new TaskConfig(targetVertex.getConfiguration());
// get the inputs. if this node is the head of an iteration, we obtain the inputs from the
// enclosing iteration node, because the inputs are the initial inputs to the iteration.
final Iterator<Channel> inConns;
if (node instanceof BulkPartialSolutionPlanNode) {
inConns = ((BulkPartialSolutionPlanNode) node).getContainingIterationNode().getInputs().iterator();
// because the partial solution has its own vertex, is has only one (logical) input.
// note this in the task configuration
targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0);
} else if (node instanceof WorksetPlanNode) {
WorksetPlanNode wspn = (WorksetPlanNode) node;
// input that is the initial workset
inConns = Collections.singleton(wspn.getContainingIterationNode().getInput2()).iterator();
// because we have a stand-alone (non-merged) workset iteration head, the initial workset will
// be input 0 and the solution set will be input 1
targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0);
targetVertexConfig.setIterationHeadSolutionSetInputIndex(1);
} else {
inConns = node.getInputs().iterator();
}
if (!inConns.hasNext()) {
throw new CompilerException("Bug: Found a non-source task with no input.");
}
int inputIndex = 0;
while (inConns.hasNext()) {
Channel input = inConns.next();
inputIndex += translateChannel(input, inputIndex, targetVertex, targetVertexConfig, false);
}
// broadcast variables
int broadcastInputIndex = 0;
for (NamedChannel broadcastInput : node.getBroadcastInputs()) {
int broadcastInputIndexDelta = translateChannel(broadcastInput, broadcastInputIndex, targetVertex, targetVertexConfig, true);
targetVertexConfig.setBroadcastInputName(broadcastInput.getName(), broadcastInputIndex);
targetVertexConfig.setBroadcastInputSerializer(broadcastInput.getSerializer(), broadcastInputIndex);
broadcastInputIndex += broadcastInputIndexDelta;
}
} catch (Exception e) {
throw new CompilerException("An error occurred while translating the optimized plan to a JobGraph: " + e.getMessage(), e);
}
}
Aggregations