use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class GenSparkWork method process.
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
GenSparkProcContext context = (GenSparkProcContext) procContext;
Preconditions.checkArgument(context != null, "AssertionError: expected context to be not null");
Preconditions.checkArgument(context.currentTask != null, "AssertionError: expected context.currentTask to be not null");
Preconditions.checkArgument(context.currentRootOperator != null, "AssertionError: expected context.currentRootOperator to be not null");
// Operator is a file sink or reduce sink. Something that forces a new vertex.
@SuppressWarnings("unchecked") Operator<? extends OperatorDesc> operator = (Operator<? extends OperatorDesc>) nd;
// root is the start of the operator pipeline we're currently
// packing into a vertex, typically a table scan, union or join
Operator<?> root = context.currentRootOperator;
LOG.debug("Root operator: " + root);
LOG.debug("Leaf operator: " + operator);
SparkWork sparkWork = context.currentTask.getWork();
SMBMapJoinOperator smbOp = GenSparkUtils.getChildOperator(root, SMBMapJoinOperator.class);
// Right now the work graph is pretty simple. If there is no
// Preceding work we have a root and will generate a map
// vertex. If there is a preceding work we will generate
// a reduce vertex
BaseWork work;
if (context.rootToWorkMap.containsKey(root)) {
// having seen the root operator before means there was a branch in the
// operator graph. There's typically two reasons for that: a) mux/demux
// b) multi insert. Mux/Demux will hit the same leaf again, multi insert
// will result into a vertex with multiple FS or RS operators.
// At this point we don't have to do anything special in this case. Just
// run through the regular paces w/o creating a new task.
work = context.rootToWorkMap.get(root);
} else {
// create a new vertex
if (context.preceedingWork == null) {
if (smbOp == null) {
work = utils.createMapWork(context, root, sparkWork, null);
} else {
// save work to be initialized later with SMB information.
work = utils.createMapWork(context, root, sparkWork, null, true);
context.smbMapJoinCtxMap.get(smbOp).mapWork = (MapWork) work;
}
} else {
work = utils.createReduceWork(context, root, sparkWork);
}
context.rootToWorkMap.put(root, work);
}
if (!context.childToWorkMap.containsKey(operator)) {
List<BaseWork> workItems = new LinkedList<BaseWork>();
workItems.add(work);
context.childToWorkMap.put(operator, workItems);
} else {
context.childToWorkMap.get(operator).add(work);
}
// remember which mapjoin operator links with which work
if (!context.currentMapJoinOperators.isEmpty()) {
for (MapJoinOperator mj : context.currentMapJoinOperators) {
LOG.debug("Processing map join: " + mj);
// remember the mapping in case we scan another branch of the mapjoin later
if (!context.mapJoinWorkMap.containsKey(mj)) {
List<BaseWork> workItems = new LinkedList<BaseWork>();
workItems.add(work);
context.mapJoinWorkMap.put(mj, workItems);
} else {
context.mapJoinWorkMap.get(mj).add(work);
}
/*
* this happens in case of map join operations.
* The tree looks like this:
*
* RS <--- we are here perhaps
* |
* MapJoin
* / \
* RS TS
* /
* TS
*
* If we are at the RS pointed above, and we may have already visited the
* RS following the TS, we have already generated work for the TS-RS.
* We need to hook the current work to this generated work.
*/
if (context.linkOpWithWorkMap.containsKey(mj)) {
Map<BaseWork, SparkEdgeProperty> linkWorkMap = context.linkOpWithWorkMap.get(mj);
if (linkWorkMap != null) {
if (context.linkChildOpWithDummyOp.containsKey(mj)) {
for (Operator<?> dummy : context.linkChildOpWithDummyOp.get(mj)) {
work.addDummyOp((HashTableDummyOperator) dummy);
}
}
for (Entry<BaseWork, SparkEdgeProperty> parentWorkMap : linkWorkMap.entrySet()) {
BaseWork parentWork = parentWorkMap.getKey();
LOG.debug("connecting " + parentWork.getName() + " with " + work.getName());
SparkEdgeProperty edgeProp = parentWorkMap.getValue();
sparkWork.connect(parentWork, work, edgeProp);
// of the downstream work
for (ReduceSinkOperator r : context.linkWorkWithReduceSinkMap.get(parentWork)) {
if (r.getConf().getOutputName() != null) {
LOG.debug("Cloning reduce sink for multi-child broadcast edge");
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(r.getCompilationOpContext(), (ReduceSinkDesc) r.getConf().clone(), r.getParentOperators());
}
r.getConf().setOutputName(work.getName());
}
}
}
}
}
// clear out the set. we don't need it anymore.
context.currentMapJoinOperators.clear();
}
// with this root operator.
if (root.getNumParent() > 0) {
Preconditions.checkArgument(work instanceof ReduceWork, "AssertionError: expected work to be a ReduceWork, but was " + work.getClass().getName());
ReduceWork reduceWork = (ReduceWork) work;
for (Operator<?> parent : new ArrayList<Operator<?>>(root.getParentOperators())) {
Preconditions.checkArgument(parent instanceof ReduceSinkOperator, "AssertionError: expected operator to be a ReduceSinkOperator, but was " + parent.getClass().getName());
ReduceSinkOperator rsOp = (ReduceSinkOperator) parent;
SparkEdgeProperty edgeProp = GenSparkUtils.getEdgeProperty(context.conf, rsOp, reduceWork);
rsOp.getConf().setOutputName(reduceWork.getName());
GenMapRedUtils.setKeyAndValueDesc(reduceWork, rsOp);
context.leafOpToFollowingWorkInfo.put(rsOp, ObjectPair.create(edgeProp, reduceWork));
LOG.debug("Removing " + parent + " as parent from " + root);
root.removeParent(parent);
}
}
// the union operators from the operator tree later.
if (!context.currentUnionOperators.isEmpty()) {
context.currentUnionOperators.clear();
context.workWithUnionOperators.add(work);
}
// reasons. Roots are data sources, leaves are data sinks. I know.
if (context.leafOpToFollowingWorkInfo.containsKey(operator)) {
ObjectPair<SparkEdgeProperty, ReduceWork> childWorkInfo = context.leafOpToFollowingWorkInfo.get(operator);
SparkEdgeProperty edgeProp = childWorkInfo.getFirst();
ReduceWork childWork = childWorkInfo.getSecond();
LOG.debug("Second pass. Leaf operator: " + operator + " has common downstream work:" + childWork);
// we don't want to connect them with the work associated with TS more than once.
if (sparkWork.getEdgeProperty(work, childWork) == null) {
sparkWork.connect(work, childWork, edgeProp);
} else {
LOG.debug("work " + work.getName() + " is already connected to " + childWork.getName() + " before");
}
} else {
LOG.debug("First pass. Leaf operator: " + operator);
}
// the next item will be a new root.
if (!operator.getChildOperators().isEmpty()) {
Preconditions.checkArgument(operator.getChildOperators().size() == 1, "AssertionError: expected operator.getChildOperators().size() to be 1, but was " + operator.getChildOperators().size());
context.parentOfRoot = operator;
context.currentRootOperator = operator.getChildOperators().get(0);
context.preceedingWork = work;
}
return null;
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class OpProcFactory method createFilter.
protected static Object createFilter(Operator op, Map<String, List<ExprNodeDesc>> predicates, OpWalkerInfo owi) throws SemanticException {
RowSchema inputRS = op.getSchema();
// combine all predicates into a single expression
List<ExprNodeDesc> preds = new ArrayList<ExprNodeDesc>();
Iterator<List<ExprNodeDesc>> iterator = predicates.values().iterator();
while (iterator.hasNext()) {
for (ExprNodeDesc pred : iterator.next()) {
preds = ExprNodeDescUtils.split(pred, preds);
}
}
if (preds.isEmpty()) {
return null;
}
ExprNodeDesc condn = ExprNodeDescUtils.mergePredicates(preds);
if (op instanceof TableScanOperator && condn instanceof ExprNodeGenericFuncDesc) {
boolean pushFilterToStorage;
HiveConf hiveConf = owi.getParseContext().getConf();
pushFilterToStorage = hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_STORAGE);
if (pushFilterToStorage) {
condn = pushFilterToStorageHandler((TableScanOperator) op, (ExprNodeGenericFuncDesc) condn, owi, hiveConf);
if (condn == null) {
// we pushed the whole thing down
return null;
}
}
}
// add new filter op
List<Operator<? extends OperatorDesc>> originalChilren = op.getChildOperators();
op.setChildOperators(null);
Operator<FilterDesc> output = OperatorFactory.getAndMakeChild(new FilterDesc(condn, false), new RowSchema(inputRS.getSignature()), op);
output.setChildOperators(originalChilren);
for (Operator<? extends OperatorDesc> ch : originalChilren) {
List<Operator<? extends OperatorDesc>> parentOperators = ch.getParentOperators();
int pos = parentOperators.indexOf(op);
assert pos != -1;
parentOperators.remove(pos);
// add the new op as the old
parentOperators.add(pos, output);
}
if (HiveConf.getBoolVar(owi.getParseContext().getConf(), HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) {
// remove the candidate filter ops
removeCandidates(op, owi);
}
// push down current ppd context to newly added filter
ExprWalkerInfo walkerInfo = owi.getPrunedPreds(op);
if (walkerInfo != null) {
walkerInfo.getNonFinalCandidates().clear();
owi.putPrunedPreds(output, walkerInfo);
}
return output;
}
Aggregations