use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class GenMapRedUtils method setUnionPlan.
private static void setUnionPlan(GenMRProcContext opProcCtx, boolean local, Task<? extends Serializable> currTask, GenMRUnionCtx uCtx, boolean mergeTask) throws SemanticException {
TableScanOperator currTopOp = opProcCtx.getCurrTopOp();
if (currTopOp != null) {
String currAliasId = opProcCtx.getCurrAliasId();
if (mergeTask || !opProcCtx.isSeenOp(currTask, currTopOp)) {
setTaskPlan(currAliasId, currTopOp, currTask, local, opProcCtx);
}
currTopOp = null;
opProcCtx.setCurrTopOp(currTopOp);
} else {
List<String> taskTmpDirLst = uCtx.getTaskTmpDir();
if ((taskTmpDirLst != null) && !(taskTmpDirLst.isEmpty())) {
List<TableDesc> tt_descLst = uCtx.getTTDesc();
assert !taskTmpDirLst.isEmpty() && !tt_descLst.isEmpty();
assert taskTmpDirLst.size() == tt_descLst.size();
int size = taskTmpDirLst.size();
assert local == false;
List<TableScanOperator> topOperators = uCtx.getListTopOperators();
MapredWork plan = (MapredWork) currTask.getWork();
for (int pos = 0; pos < size; pos++) {
String taskTmpDir = taskTmpDirLst.get(pos);
Path taskTmpDirPath = new Path(taskTmpDir);
MapWork mWork = plan.getMapWork();
if (!mWork.getPathToAliases().containsKey(taskTmpDirPath)) {
taskTmpDir = taskTmpDir.intern();
StringInternUtils.internUriStringsInPath(taskTmpDirPath);
TableDesc tt_desc = tt_descLst.get(pos);
mWork.addPathToAlias(taskTmpDirPath, taskTmpDir);
mWork.addPathToPartitionInfo(taskTmpDirPath, new PartitionDesc(tt_desc, null));
mWork.getAliasToWork().put(taskTmpDir, topOperators.get(pos));
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class GenMapRedUtils method setTaskPlan.
/**
* set the current task in the mapredWork.
*
* @param alias
* current alias
* @param topOp
* the top operator of the stack
* @param plan
* current plan
* @param local
* whether you need to add to map-reduce or local work
* @param tt_desc
* table descriptor
* @throws SerDeException
*/
public static void setTaskPlan(Path path, String alias, Operator<? extends OperatorDesc> topOp, MapWork plan, boolean local, TableDesc tt_desc) throws SemanticException {
if (path == null || alias == null) {
return;
}
if (topOp instanceof TableScanOperator) {
try {
Utilities.addSchemaEvolutionToTableScanOperator((StructObjectInspector) tt_desc.getDeserializer().getObjectInspector(), (TableScanOperator) topOp);
} catch (Exception e) {
throw new SemanticException(e);
}
}
if (!local) {
plan.addPathToAlias(path, alias);
plan.addPathToPartitionInfo(path, new PartitionDesc(tt_desc, null));
plan.getAliasToWork().put(alias, topOp);
} else {
// populate local work if needed
MapredLocalWork localPlan = plan.getMapRedLocalWork();
if (localPlan == null) {
localPlan = new MapredLocalWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>(), new LinkedHashMap<String, FetchWork>());
}
assert localPlan.getAliasToWork().get(alias) == null;
assert localPlan.getAliasToFetchWork().get(alias) == null;
localPlan.getAliasToWork().put(alias, topOp);
localPlan.getAliasToFetchWork().put(alias, new FetchWork(new Path(alias), tt_desc));
plan.setMapRedLocalWork(localPlan);
}
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class GlobalLimitOptimizer method checkQbpForGlobalLimit.
/**
* Check the limit number in all sub queries
*
* @return if there is one and only one limit for all subqueries, return the limit
* if there is no limit, return 0
* otherwise, return null
*/
private static LimitOperator checkQbpForGlobalLimit(TableScanOperator ts) {
Set<Class<? extends Operator<?>>> searchedClasses = new ImmutableSet.Builder<Class<? extends Operator<?>>>().add(ReduceSinkOperator.class).add(GroupByOperator.class).add(FilterOperator.class).add(LimitOperator.class).build();
Multimap<Class<? extends Operator<?>>, Operator<?>> ops = OperatorUtils.classifyOperators(ts, searchedClasses);
// existsOrdering AND existsPartitioning should be false.
for (Operator<?> op : ops.get(ReduceSinkOperator.class)) {
ReduceSinkDesc reduceSinkConf = ((ReduceSinkOperator) op).getConf();
if (reduceSinkConf.isOrdering() || reduceSinkConf.isPartitioning()) {
return null;
}
}
// - There cannot exist any (distinct) aggregate.
for (Operator<?> op : ops.get(GroupByOperator.class)) {
GroupByDesc groupByConf = ((GroupByOperator) op).getConf();
if (groupByConf.isAggregate() || groupByConf.isDistinct()) {
return null;
}
}
// - There cannot exist any sampling predicate.
for (Operator<?> op : ops.get(FilterOperator.class)) {
FilterDesc filterConf = ((FilterOperator) op).getConf();
if (filterConf.getIsSamplingPred()) {
return null;
}
}
// If there is one and only one limit starting at op, return the limit
// If there is no limit, return 0
// Otherwise, return null
Collection<Operator<?>> limitOps = ops.get(LimitOperator.class);
if (limitOps.size() == 1) {
return (LimitOperator) limitOps.iterator().next();
} else if (limitOps.size() == 0) {
return null;
}
return null;
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class SharedWorkOptimizer method splitTableScanOpsByTable.
private static Multimap<String, TableScanOperator> splitTableScanOpsByTable(ParseContext pctx) {
Multimap<String, TableScanOperator> tableNameToOps = ArrayListMultimap.create();
// Sort by operator ID so we get deterministic results
Map<String, TableScanOperator> sortedTopOps = new TreeMap<>(pctx.getTopOps());
for (Entry<String, TableScanOperator> e : sortedTopOps.entrySet()) {
TableScanOperator tsOp = e.getValue();
tableNameToOps.put(tsOp.getConf().getTableMetadata().getDbName() + "." + tsOp.getConf().getTableMetadata().getTableName(), tsOp);
}
return tableNameToOps;
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class SharedWorkOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
final Map<String, TableScanOperator> topOps = pctx.getTopOps();
if (topOps.size() < 2) {
// Nothing to do, bail out
return pctx;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Before SharedWorkOptimizer:\n" + Operator.toString(pctx.getTopOps().values()));
}
// Cache to use during optimization
SharedWorkOptimizerCache optimizerCache = new SharedWorkOptimizerCache();
// Gather information about the DPP table scans and store it in the cache
gatherDPPTableScanOps(pctx, optimizerCache);
// Map of dbName.TblName -> TSOperator
Multimap<String, TableScanOperator> tableNameToOps = splitTableScanOpsByTable(pctx);
// We enforce a certain order when we do the reutilization.
// In particular, we use size of table x number of reads to
// rank the tables.
List<Entry<String, Long>> sortedTables = rankTablesByAccumulatedSize(pctx);
LOG.debug("Sorted tables by size: {}", sortedTables);
// Execute optimization
Multimap<String, TableScanOperator> existingOps = ArrayListMultimap.create();
Set<Operator<?>> removedOps = new HashSet<>();
for (Entry<String, Long> tablePair : sortedTables) {
String tableName = tablePair.getKey();
for (TableScanOperator discardableTsOp : tableNameToOps.get(tableName)) {
if (removedOps.contains(discardableTsOp)) {
LOG.debug("Skip {} as it has already been removed", discardableTsOp);
continue;
}
Collection<TableScanOperator> prevTsOps = existingOps.get(tableName);
for (TableScanOperator retainableTsOp : prevTsOps) {
if (removedOps.contains(retainableTsOp)) {
LOG.debug("Skip {} as it has already been removed", retainableTsOp);
continue;
}
// First we quickly check if the two table scan operators can actually be merged
boolean mergeable = areMergeable(pctx, optimizerCache, retainableTsOp, discardableTsOp);
if (!mergeable) {
// Skip
LOG.debug("{} and {} cannot be merged", retainableTsOp, discardableTsOp);
continue;
}
// Secondly, we extract information about the part of the tree that can be merged
// as well as some structural information (memory consumption) that needs to be
// used to determined whether the merge can happen
SharedResult sr = extractSharedOptimizationInfoForRoot(pctx, optimizerCache, retainableTsOp, discardableTsOp);
// tables.
if (!validPreConditions(pctx, optimizerCache, sr)) {
// Skip
LOG.debug("{} and {} do not meet preconditions", retainableTsOp, discardableTsOp);
continue;
}
// We can merge
if (sr.retainableOps.size() > 1) {
// More than TS operator
Operator<?> lastRetainableOp = sr.retainableOps.get(sr.retainableOps.size() - 1);
Operator<?> lastDiscardableOp = sr.discardableOps.get(sr.discardableOps.size() - 1);
if (lastDiscardableOp.getNumChild() != 0) {
List<Operator<? extends OperatorDesc>> allChildren = Lists.newArrayList(lastDiscardableOp.getChildOperators());
for (Operator<? extends OperatorDesc> op : allChildren) {
lastDiscardableOp.getChildOperators().remove(op);
op.replaceParent(lastDiscardableOp, lastRetainableOp);
lastRetainableOp.getChildOperators().add(op);
}
}
LOG.debug("Merging subtree starting at {} into subtree starting at {}", discardableTsOp, retainableTsOp);
} else {
// Only TS operator
ExprNodeGenericFuncDesc exprNode = null;
if (retainableTsOp.getConf().getFilterExpr() != null) {
// Push filter on top of children
pushFilterToTopOfTableScan(optimizerCache, retainableTsOp);
// Clone to push to table scan
exprNode = (ExprNodeGenericFuncDesc) retainableTsOp.getConf().getFilterExpr();
}
if (discardableTsOp.getConf().getFilterExpr() != null) {
// Push filter on top
pushFilterToTopOfTableScan(optimizerCache, discardableTsOp);
ExprNodeGenericFuncDesc tsExprNode = discardableTsOp.getConf().getFilterExpr();
if (exprNode != null && !exprNode.isSame(tsExprNode)) {
// We merge filters from previous scan by ORing with filters from current scan
if (exprNode.getGenericUDF() instanceof GenericUDFOPOr) {
List<ExprNodeDesc> newChildren = new ArrayList<>(exprNode.getChildren().size() + 1);
for (ExprNodeDesc childExprNode : exprNode.getChildren()) {
if (childExprNode.isSame(tsExprNode)) {
// We do not need to do anything, it is in the OR expression
break;
}
newChildren.add(childExprNode);
}
if (exprNode.getChildren().size() == newChildren.size()) {
newChildren.add(tsExprNode);
exprNode = ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPOr(), newChildren);
}
} else {
exprNode = ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPOr(), Arrays.<ExprNodeDesc>asList(exprNode, tsExprNode));
}
}
}
// Replace filter
retainableTsOp.getConf().setFilterExpr(exprNode);
// Replace table scan operator
List<Operator<? extends OperatorDesc>> allChildren = Lists.newArrayList(discardableTsOp.getChildOperators());
for (Operator<? extends OperatorDesc> op : allChildren) {
discardableTsOp.getChildOperators().remove(op);
op.replaceParent(discardableTsOp, retainableTsOp);
retainableTsOp.getChildOperators().add(op);
}
LOG.debug("Merging {} into {}", discardableTsOp, retainableTsOp);
}
// we are going to eliminate
for (Operator<?> op : sr.discardableInputOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
// Remove DPP predicates
if (op instanceof ReduceSinkOperator) {
SemiJoinBranchInfo sjbi = pctx.getRsToSemiJoinBranchInfo().get(op);
if (sjbi != null && !sr.discardableOps.contains(sjbi.getTsOp()) && !sr.discardableInputOps.contains(sjbi.getTsOp())) {
GenTezUtils.removeSemiJoinOperator(pctx, (ReduceSinkOperator) op, sjbi.getTsOp());
optimizerCache.tableScanToDPPSource.remove(sjbi.getTsOp(), op);
}
} else if (op instanceof AppMasterEventOperator) {
DynamicPruningEventDesc dped = (DynamicPruningEventDesc) op.getConf();
if (!sr.discardableOps.contains(dped.getTableScan()) && !sr.discardableInputOps.contains(dped.getTableScan())) {
GenTezUtils.removeSemiJoinOperator(pctx, (AppMasterEventOperator) op, dped.getTableScan());
optimizerCache.tableScanToDPPSource.remove(dped.getTableScan(), op);
}
}
LOG.debug("Input operator removed: {}", op);
}
// Then we merge the operators of the works we are going to merge
optimizerCache.removeOpAndCombineWork(discardableTsOp, retainableTsOp);
removedOps.add(discardableTsOp);
// Finally we remove the expression from the tree
for (Operator<?> op : sr.discardableOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
if (sr.discardableOps.size() == 1) {
// If there is a single discardable operator, it is a TableScanOperator
// and it means that we have merged filter expressions for it. Thus, we
// might need to remove DPP predicates from the retainable TableScanOperator
Collection<Operator<?>> c = optimizerCache.tableScanToDPPSource.get((TableScanOperator) op);
for (Operator<?> dppSource : c) {
if (dppSource instanceof ReduceSinkOperator) {
GenTezUtils.removeSemiJoinOperator(pctx, (ReduceSinkOperator) dppSource, (TableScanOperator) sr.retainableOps.get(0));
optimizerCache.tableScanToDPPSource.remove(sr.retainableOps.get(0), op);
} else if (dppSource instanceof AppMasterEventOperator) {
GenTezUtils.removeSemiJoinOperator(pctx, (AppMasterEventOperator) dppSource, (TableScanOperator) sr.retainableOps.get(0));
optimizerCache.tableScanToDPPSource.remove(sr.retainableOps.get(0), op);
}
}
}
LOG.debug("Operator removed: {}", op);
}
break;
}
if (removedOps.contains(discardableTsOp)) {
// This operator has been removed, remove it from the list of existing operators
existingOps.remove(tableName, discardableTsOp);
} else {
// This operator has not been removed, include it in the list of existing operators
existingOps.put(tableName, discardableTsOp);
}
}
}
// Remove unused table scan operators
Iterator<Entry<String, TableScanOperator>> it = topOps.entrySet().iterator();
while (it.hasNext()) {
Entry<String, TableScanOperator> e = it.next();
if (e.getValue().getNumChild() == 0) {
it.remove();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("After SharedWorkOptimizer:\n" + Operator.toString(pctx.getTopOps().values()));
}
if (pctx.getConf().getBoolVar(ConfVars.HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION)) {
// Gather RS operators that 1) belong to root works, i.e., works containing TS operators,
// and 2) share the same input operator.
// These will be the first target for extended shared work optimization
Multimap<Operator<?>, ReduceSinkOperator> parentToRsOps = ArrayListMultimap.create();
Set<Operator<?>> visited = new HashSet<>();
for (Entry<String, TableScanOperator> e : topOps.entrySet()) {
gatherReduceSinkOpsByInput(parentToRsOps, visited, findWorkOperators(optimizerCache, e.getValue()));
}
while (!parentToRsOps.isEmpty()) {
// As above, we enforce a certain order when we do the reutilization.
// In particular, we use size of data in RS x number of uses.
List<Entry<Operator<?>, Long>> sortedRSGroups = rankOpsByAccumulatedSize(parentToRsOps.keySet());
LOG.debug("Sorted operators by size: {}", sortedRSGroups);
// Execute extended optimization
// For each RS, check whether other RS in same work could be merge into this one.
// If they are merged, RS operators in the resulting work will be considered
// mergeable in next loop iteration.
Multimap<Operator<?>, ReduceSinkOperator> existingRsOps = ArrayListMultimap.create();
for (Entry<Operator<?>, Long> rsGroupInfo : sortedRSGroups) {
Operator<?> rsParent = rsGroupInfo.getKey();
for (ReduceSinkOperator discardableRsOp : parentToRsOps.get(rsParent)) {
if (removedOps.contains(discardableRsOp)) {
LOG.debug("Skip {} as it has already been removed", discardableRsOp);
continue;
}
Collection<ReduceSinkOperator> otherRsOps = existingRsOps.get(rsParent);
for (ReduceSinkOperator retainableRsOp : otherRsOps) {
if (removedOps.contains(retainableRsOp)) {
LOG.debug("Skip {} as it has already been removed", retainableRsOp);
continue;
}
// First we quickly check if the two RS operators can actually be merged.
// We already know that these two RS operators have the same parent, but
// we need to check whether both RS are actually equal. Further, we check
// whether their child is also equal. If any of these conditions are not
// met, we are not going to try to merge.
boolean mergeable = compareOperator(pctx, retainableRsOp, discardableRsOp) && compareOperator(pctx, retainableRsOp.getChildOperators().get(0), discardableRsOp.getChildOperators().get(0));
if (!mergeable) {
// Skip
LOG.debug("{} and {} cannot be merged", retainableRsOp, discardableRsOp);
continue;
}
LOG.debug("Checking additional conditions for merging subtree starting at {}" + " into subtree starting at {}", discardableRsOp, retainableRsOp);
// Secondly, we extract information about the part of the tree that can be merged
// as well as some structural information (memory consumption) that needs to be
// used to determined whether the merge can happen
Operator<?> retainableRsOpChild = retainableRsOp.getChildOperators().get(0);
Operator<?> discardableRsOpChild = discardableRsOp.getChildOperators().get(0);
SharedResult sr = extractSharedOptimizationInfo(pctx, optimizerCache, retainableRsOp, discardableRsOp, retainableRsOpChild, discardableRsOpChild);
// tables.
if (sr.retainableOps.isEmpty() || !validPreConditions(pctx, optimizerCache, sr)) {
// Skip
LOG.debug("{} and {} do not meet preconditions", retainableRsOp, discardableRsOp);
continue;
}
// We can merge
Operator<?> lastRetainableOp = sr.retainableOps.get(sr.retainableOps.size() - 1);
Operator<?> lastDiscardableOp = sr.discardableOps.get(sr.discardableOps.size() - 1);
if (lastDiscardableOp.getNumChild() != 0) {
List<Operator<? extends OperatorDesc>> allChildren = Lists.newArrayList(lastDiscardableOp.getChildOperators());
for (Operator<? extends OperatorDesc> op : allChildren) {
lastDiscardableOp.getChildOperators().remove(op);
op.replaceParent(lastDiscardableOp, lastRetainableOp);
lastRetainableOp.getChildOperators().add(op);
}
}
LOG.debug("Merging subtree starting at {} into subtree starting at {}", discardableRsOp, retainableRsOp);
// we are going to eliminate
for (Operator<?> op : sr.discardableInputOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
// Remove DPP predicates
if (op instanceof ReduceSinkOperator) {
SemiJoinBranchInfo sjbi = pctx.getRsToSemiJoinBranchInfo().get(op);
if (sjbi != null && !sr.discardableOps.contains(sjbi.getTsOp()) && !sr.discardableInputOps.contains(sjbi.getTsOp())) {
GenTezUtils.removeSemiJoinOperator(pctx, (ReduceSinkOperator) op, sjbi.getTsOp());
optimizerCache.tableScanToDPPSource.remove(sjbi.getTsOp(), op);
}
} else if (op instanceof AppMasterEventOperator) {
DynamicPruningEventDesc dped = (DynamicPruningEventDesc) op.getConf();
if (!sr.discardableOps.contains(dped.getTableScan()) && !sr.discardableInputOps.contains(dped.getTableScan())) {
GenTezUtils.removeSemiJoinOperator(pctx, (AppMasterEventOperator) op, dped.getTableScan());
optimizerCache.tableScanToDPPSource.remove(dped.getTableScan(), op);
}
}
LOG.debug("Input operator removed: {}", op);
}
// We remove the discardable RS operator
OperatorUtils.removeOperator(discardableRsOp);
optimizerCache.removeOp(discardableRsOp);
removedOps.add(discardableRsOp);
LOG.debug("Operator removed: {}", discardableRsOp);
// Then we merge the operators of the works we are going to merge
optimizerCache.removeOpAndCombineWork(discardableRsOpChild, retainableRsOpChild);
// Finally we remove the rest of the expression from the tree
for (Operator<?> op : sr.discardableOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
LOG.debug("Operator removed: {}", op);
}
break;
}
if (removedOps.contains(discardableRsOp)) {
// This operator has been removed, remove it from the list of existing operators
existingRsOps.remove(rsParent, discardableRsOp);
} else {
// This operator has not been removed, include it in the list of existing operators
existingRsOps.put(rsParent, discardableRsOp);
}
}
}
// We gather the operators that will be used for next iteration of extended optimization
// (if any)
parentToRsOps = ArrayListMultimap.create();
visited = new HashSet<>();
for (Entry<Operator<?>, ReduceSinkOperator> e : existingRsOps.entries()) {
if (removedOps.contains(e.getValue()) || e.getValue().getNumChild() < 1) {
// semijoin RS), we can quickly skip this one
continue;
}
gatherReduceSinkOpsByInput(parentToRsOps, visited, findWorkOperators(optimizerCache, e.getValue().getChildOperators().get(0)));
}
}
// Remove unused table scan operators
it = topOps.entrySet().iterator();
while (it.hasNext()) {
Entry<String, TableScanOperator> e = it.next();
if (e.getValue().getNumChild() == 0) {
it.remove();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("After SharedWorkExtendedOptimizer:\n" + Operator.toString(pctx.getTopOps().values()));
}
}
// we use the basic or the extended version of the optimizer.
if (pctx.getConf().getBoolVar(ConfVars.HIVE_IN_TEST)) {
Set<Operator<?>> visited = new HashSet<>();
it = topOps.entrySet().iterator();
while (it.hasNext()) {
Entry<String, TableScanOperator> e = it.next();
for (Operator<?> op : OperatorUtils.findOperators(e.getValue(), Operator.class)) {
if (!visited.contains(op)) {
if (!findWorkOperators(optimizerCache, op).equals(findWorkOperators(op, new HashSet<Operator<?>>()))) {
throw new SemanticException("Error in shared work optimizer: operator cache contents" + "and actual plan differ");
}
visited.add(op);
}
}
}
}
return pctx;
}
Aggregations