use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class GenMRTableScan1 method process.
/**
* Table Sink encountered.
* @param nd
* the table sink operator encountered
* @param opProcCtx
* context
*/
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
TableScanOperator op = (TableScanOperator) nd;
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
ParseContext parseCtx = ctx.getParseCtx();
Table table = op.getConf().getTableMetadata();
Class<? extends InputFormat> inputFormat = table.getInputFormatClass();
Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
// create a dummy MapReduce task
MapredWork currWork = GenMapRedUtils.getMapRedWork(parseCtx);
MapRedTask currTask = (MapRedTask) TaskFactory.get(currWork);
ctx.setCurrTask(currTask);
ctx.setCurrTopOp(op);
for (String alias : parseCtx.getTopOps().keySet()) {
Operator<? extends OperatorDesc> currOp = parseCtx.getTopOps().get(alias);
if (currOp == op) {
String currAliasId = alias;
ctx.setCurrAliasId(currAliasId);
mapCurrCtx.put(op, new GenMapRedCtx(currTask, currAliasId));
if (parseCtx.getQueryProperties().isAnalyzeCommand()) {
boolean noScan = parseCtx.getQueryProperties().isNoScanAnalyzeCommand();
if (OrcInputFormat.class.isAssignableFrom(inputFormat) || MapredParquetInputFormat.class.isAssignableFrom(inputFormat)) {
// For ORC and Parquet, all the following statements are the same
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
// There will not be any MR or Tez job above this task
StatsWork statWork = new StatsWork(table, parseCtx.getConf());
statWork.setFooterScan();
// If partition is specified, get pruned partition list
Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(op);
if (confirmedParts.size() > 0) {
List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
PrunedPartitionList partList = new PrunedPartitionList(table, confirmedParts, partCols, false);
statWork.addInputPartitions(partList.getPartitions());
}
Task<StatsWork> snjTask = TaskFactory.get(statWork);
ctx.setCurrTask(snjTask);
ctx.setCurrTopOp(null);
ctx.getRootTasks().clear();
ctx.getRootTasks().add(snjTask);
} else {
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS;
// The plan consists of a simple MapRedTask followed by a StatsTask.
// The MR task is just a simple TableScanOperator
BasicStatsWork statsWork = new BasicStatsWork(table.getTableSpec());
statsWork.setNoScanAnalyzeCommand(noScan);
StatsWork columnStatsWork = new StatsWork(table, statsWork, parseCtx.getConf());
columnStatsWork.collectStatsFromAggregator(op.getConf());
columnStatsWork.setSourceTask(currTask);
Task<StatsWork> columnStatsTask = TaskFactory.get(columnStatsWork);
currTask.addDependentTask(columnStatsTask);
if (!ctx.getRootTasks().contains(currTask)) {
ctx.getRootTasks().add(currTask);
}
// The plan consists of a StatsTask only.
if (noScan) {
columnStatsTask.setParentTasks(null);
ctx.getRootTasks().remove(currTask);
ctx.getRootTasks().add(columnStatsTask);
}
currWork.getMapWork().setGatheringStats(true);
if (currWork.getReduceWork() != null) {
currWork.getReduceWork().setGatheringStats(true);
}
// NOTE: here we should use the new partition predicate pushdown API to get a list of
// pruned list,
// and pass it to setTaskPlan as the last parameter
Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(op);
if (confirmedPartns.size() > 0) {
List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
PrunedPartitionList partList = new PrunedPartitionList(table, confirmedPartns, partCols, false);
GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx, partList);
} else {
// non-partitioned table
GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx);
}
}
}
return true;
}
}
assert false;
return null;
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class GenMRUnion1 method process.
/**
* Union Operator encountered . Currently, the algorithm is pretty simple: If
* all the sub-queries are map-only, don't do anything. Otherwise, insert a
* FileSink on top of all the sub-queries.
*
* This can be optimized later on.
*
* @param nd
* the file sink operator encountered
* @param opProcCtx
* context
*/
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
UnionOperator union = (UnionOperator) nd;
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
ParseContext parseCtx = ctx.getParseCtx();
UnionProcContext uCtx = parseCtx.getUCtx();
// Map-only subqueries can be optimized in future to not write to a file in
// future
Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
if (union.getConf().isAllInputsInSameReducer()) {
// All inputs of this UnionOperator are in the same Reducer.
// We do not need to break the operator tree.
mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
return null;
}
UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
ctx.setCurrUnionOp(union);
// map-reduce job
if (uPrsCtx.allMapOnlySubQ()) {
return processMapOnlyUnion(union, stack, ctx, uCtx);
}
assert uPrsCtx != null;
Task<? extends Serializable> currTask = ctx.getCurrTask();
int pos = UnionProcFactory.getPositionParent(union, stack);
Task<? extends Serializable> uTask = null;
MapredWork uPlan = null;
// union is encountered for the first time
GenMRUnionCtx uCtxTask = ctx.getUnionTask(union);
if (uCtxTask == null) {
uPlan = GenMapRedUtils.getMapRedWork(parseCtx);
uTask = TaskFactory.get(uPlan);
uCtxTask = new GenMRUnionCtx(uTask);
ctx.setUnionTask(union, uCtxTask);
} else {
uTask = uCtxTask.getUTask();
}
// Copy into the current union task plan if
if (uPrsCtx.getMapOnlySubq(pos) && uPrsCtx.getRootTask(pos)) {
processSubQueryUnionMerge(ctx, uCtxTask, union, stack);
if (ctx.getRootTasks().contains(currTask)) {
ctx.getRootTasks().remove(currTask);
}
} else // If it a map-reduce job, create a temporary file
{
// is the current task a root task
if (shouldBeRootTask(currTask) && !ctx.getRootTasks().contains(currTask) && (currTask.getParentTasks() == null || currTask.getParentTasks().isEmpty())) {
ctx.getRootTasks().add(currTask);
}
processSubQueryUnionCreateIntermediate(union.getParentOperators().get(pos), union, uTask, ctx, uCtxTask);
// the currAliasId and CurrTopOp is not valid any more
ctx.setCurrAliasId(null);
ctx.setCurrTopOp(null);
ctx.getOpTaskMap().put(null, uTask);
}
ctx.setCurrTask(uTask);
mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(ctx.getCurrTask(), null));
return true;
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class GenMapRedUtils method initUnionPlan.
/**
* Initialize the current union plan.
*
* @param op
* the reduce sink operator encountered
* @param opProcCtx
* processing context
*/
public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionOp, GenMRProcContext opProcCtx, Task<? extends Serializable> unionTask) throws SemanticException {
Operator<? extends OperatorDesc> reducer = op.getChildOperators().get(0);
MapredWork plan = (MapredWork) unionTask.getWork();
HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = opProcCtx.getOpTaskMap();
opTaskMap.put(reducer, unionTask);
plan.setReduceWork(new ReduceWork());
plan.getReduceWork().setReducer(reducer);
plan.getReduceWork().setReducer(reducer);
ReduceSinkDesc desc = op.getConf();
plan.getReduceWork().setNumReduceTasks(desc.getNumReducers());
if (needsTagging(plan.getReduceWork())) {
plan.getReduceWork().setNeedsTagging(true);
}
initUnionPlan(opProcCtx, currUnionOp, unionTask, false);
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class MapJoinProcessor method genMapJoinLocalWork.
/**
* Generate the MapRed Local Work for the given map-join operator
*
* @param newWork
* @param mapJoinOp
* map-join operator for which local work needs to be generated.
* @param bigTablePos
* @throws SemanticException
*/
private static void genMapJoinLocalWork(MapredWork newWork, MapJoinOperator mapJoinOp, int bigTablePos) throws SemanticException {
// keep the small table alias to avoid concurrent modification exception
ArrayList<String> smallTableAliasList = new ArrayList<String>();
// create a new MapredLocalWork
MapredLocalWork newLocalWork = new MapredLocalWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>(), new LinkedHashMap<String, FetchWork>());
for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : newWork.getMapWork().getAliasToWork().entrySet()) {
String alias = entry.getKey();
Operator<? extends OperatorDesc> op = entry.getValue();
// if the table scan is for big table; then skip it
// tracing down the operator tree from the table scan operator
Operator<? extends OperatorDesc> parentOp = op;
Operator<? extends OperatorDesc> childOp = op.getChildOperators().get(0);
while ((childOp != null) && (!childOp.equals(mapJoinOp))) {
parentOp = childOp;
assert parentOp.getChildOperators().size() == 1;
childOp = parentOp.getChildOperators().get(0);
}
if (childOp == null) {
throw new SemanticException("Cannot find join op by tracing down the table scan operator tree");
}
// skip the big table pos
int i = childOp.getParentOperators().indexOf(parentOp);
if (i == bigTablePos) {
continue;
}
// set alias to work and put into smallTableAliasList
newLocalWork.getAliasToWork().put(alias, op);
smallTableAliasList.add(alias);
// get input path and remove this alias from pathToAlias
// because this file will be fetched by fetch operator
LinkedHashMap<Path, ArrayList<String>> pathToAliases = newWork.getMapWork().getPathToAliases();
// keep record all the input path for this alias
HashSet<Path> pathSet = new HashSet<>();
HashSet<Path> emptyPath = new HashSet<>();
for (Map.Entry<Path, ArrayList<String>> entry2 : pathToAliases.entrySet()) {
Path path = entry2.getKey();
ArrayList<String> list = entry2.getValue();
if (list.contains(alias)) {
// add to path set
pathSet.add(path);
// remove this alias from the alias list
list.remove(alias);
if (list.size() == 0) {
emptyPath.add(path);
}
}
}
// remove the path, with which no alias associates
for (Path path : emptyPath) {
newWork.getMapWork().removePathToAlias(path);
}
// create fetch work
FetchWork fetchWork = null;
List<Path> partDir = new ArrayList<Path>();
List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
for (Path tablePath : pathSet) {
PartitionDesc partitionDesc = newWork.getMapWork().getPathToPartitionInfo().get(tablePath);
// create fetchwork for non partitioned table
if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) {
fetchWork = new FetchWork(tablePath, partitionDesc.getTableDesc());
break;
}
// if table is partitioned,add partDir and partitionDesc
partDir.add(tablePath);
partDesc.add(partitionDesc);
}
// create fetchwork for partitioned table
if (fetchWork == null) {
TableDesc table = newWork.getMapWork().getAliasToPartnInfo().get(alias).getTableDesc();
fetchWork = new FetchWork(partDir, partDesc, table);
}
// set alias to fetch work
newLocalWork.getAliasToFetchWork().put(alias, fetchWork);
}
// remove small table ailias from aliasToWork;Avoid concurrent modification
for (String alias : smallTableAliasList) {
newWork.getMapWork().getAliasToWork().remove(alias);
}
// set up local work
newWork.getMapWork().setMapRedLocalWork(newLocalWork);
// remove reducer
newWork.setReduceWork(null);
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class MapJoinProcessor method convertMapJoin.
/**
* convert a regular join to a a map-side join.
*
* @param opParseCtxMap
* @param op
* join operator
* @param joinTree
* qb join tree
* @param mapJoinPos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
* @param noCheckOuterJoin
* @param validateMapJoinTree
*/
public MapJoinOperator convertMapJoin(HiveConf conf, JoinOperator op, boolean leftInputJoin, String[] baseSrc, List<String> mapAliases, int mapJoinPos, boolean noCheckOuterJoin, boolean validateMapJoinTree) throws SemanticException {
// outer join cannot be performed on a table which is being cached
JoinDesc desc = op.getConf();
JoinCondDesc[] condns = desc.getConds();
if (!noCheckOuterJoin) {
if (checkMapJoin(mapJoinPos, condns) < 0) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
}
// Walk over all the sources (which are guaranteed to be reduce sink
// operators).
// The join outputs a concatenation of all the inputs.
List<Operator<? extends OperatorDesc>> parentOps = op.getParentOperators();
List<Operator<? extends OperatorDesc>> newParentOps = new ArrayList<Operator<? extends OperatorDesc>>();
List<Operator<? extends OperatorDesc>> oldReduceSinkParentOps = new ArrayList<Operator<? extends OperatorDesc>>();
// found a source which is not to be stored in memory
if (leftInputJoin) {
// assert mapJoinPos == 0;
Operator<? extends OperatorDesc> parentOp = parentOps.get(0);
assert parentOp.getParentOperators().size() == 1;
Operator<? extends OperatorDesc> grandParentOp = parentOp.getParentOperators().get(0);
oldReduceSinkParentOps.add(parentOp);
newParentOps.add(grandParentOp);
}
byte pos = 0;
// Remove parent reduce-sink operators
for (String src : baseSrc) {
if (src != null) {
Operator<? extends OperatorDesc> parentOp = parentOps.get(pos);
assert parentOp.getParentOperators().size() == 1;
Operator<? extends OperatorDesc> grandParentOp = parentOp.getParentOperators().get(0);
oldReduceSinkParentOps.add(parentOp);
newParentOps.add(grandParentOp);
}
pos++;
}
// create the map-join operator
MapJoinOperator mapJoinOp = convertJoinOpMapJoinOp(conf, op, leftInputJoin, baseSrc, mapAliases, mapJoinPos, noCheckOuterJoin);
// remove old parents
for (pos = 0; pos < newParentOps.size(); pos++) {
newParentOps.get(pos).replaceChild(oldReduceSinkParentOps.get(pos), mapJoinOp);
}
mapJoinOp.getParentOperators().removeAll(oldReduceSinkParentOps);
mapJoinOp.setParentOperators(newParentOps);
// make sure only map-joins can be performed.
if (validateMapJoinTree) {
validateMapJoinTypes(mapJoinOp);
}
return mapJoinOp;
}
Aggregations