use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.
the class GenMRRedSink3 method process.
/**
* Reduce Scan encountered.
*
* @param nd
* the reduce sink operator encountered
* @param opProcCtx
* context
*/
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
ReduceSinkOperator op = (ReduceSinkOperator) nd;
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
// union consisted on a bunch of map-reduce jobs, and it has been split at
// the union
Operator<? extends OperatorDesc> reducer = op.getChildOperators().get(0);
UnionOperator union = Utils.findNode(stack, UnionOperator.class);
assert union != null;
Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(union);
Task<? extends Serializable> unionTask = null;
if (mapredCtx != null) {
unionTask = mapredCtx.getCurrTask();
} else {
unionTask = ctx.getCurrTask();
}
MapredWork plan = (MapredWork) unionTask.getWork();
HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap();
Task<? extends Serializable> reducerTask = opTaskMap.get(reducer);
ctx.setCurrTask(unionTask);
// If the plan for this reducer does not exist, initialize the plan
if (reducerTask == null) {
// When the reducer is encountered for the first time
if (plan.getReduceWork() == null) {
GenMapRedUtils.initUnionPlan(op, union, ctx, unionTask);
// When union is followed by a multi-table insert
} else {
GenMapRedUtils.splitPlan(op, ctx);
}
} else if (plan.getReduceWork() != null && plan.getReduceWork().getReducer() == reducer) {
// The union is already initialized. However, the union is walked from
// another input
// initUnionPlan is idempotent
GenMapRedUtils.initUnionPlan(op, union, ctx, unionTask);
} else {
GenMapRedUtils.joinUnionPlan(ctx, union, unionTask, reducerTask, false);
ctx.setCurrTask(reducerTask);
}
mapCurrCtx.put(op, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
// the union operator has been processed
ctx.setCurrUnionOp(null);
return true;
}
use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.
the class GenMRUnion1 method processMapOnlyUnion.
/**
* Process the union if all sub-queries are map-only
*
* @return
* @throws SemanticException
*/
private Object processMapOnlyUnion(UnionOperator union, Stack<Node> stack, GenMRProcContext ctx, UnionProcContext uCtx) throws SemanticException {
// merge currTask from multiple topOps
GenMRUnionCtx uCtxTask = ctx.getUnionTask(union);
if (uCtxTask != null) {
// get task associated with this union
Task<? extends Serializable> uTask = ctx.getUnionTask(union).getUTask();
if (uTask != null) {
if (ctx.getCurrTask() != null && ctx.getCurrTask() != uTask) {
// if ctx.getCurrTask() is in rootTasks, should be removed
ctx.getRootTasks().remove(ctx.getCurrTask());
}
ctx.setCurrTask(uTask);
}
}
UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
ctx.getMapCurrCtx().put(union, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
// if the union is the first time seen, set current task to GenMRUnionCtx
uCtxTask = ctx.getUnionTask(union);
if (uCtxTask == null) {
uCtxTask = new GenMRUnionCtx(ctx.getCurrTask());
ctx.setUnionTask(union, uCtxTask);
}
Task<? extends Serializable> uTask = ctx.getCurrTask();
if (uTask.getParentTasks() == null || uTask.getParentTasks().isEmpty()) {
if (!ctx.getRootTasks().contains(uTask)) {
ctx.getRootTasks().add(uTask);
}
}
return true;
}
use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.
the class GenMRUnion1 method process.
/**
* Union Operator encountered . Currently, the algorithm is pretty simple: If
* all the sub-queries are map-only, don't do anything. Otherwise, insert a
* FileSink on top of all the sub-queries.
*
* This can be optimized later on.
*
* @param nd
* the file sink operator encountered
* @param opProcCtx
* context
*/
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
UnionOperator union = (UnionOperator) nd;
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
ParseContext parseCtx = ctx.getParseCtx();
UnionProcContext uCtx = parseCtx.getUCtx();
// Map-only subqueries can be optimized in future to not write to a file in
// future
Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
if (union.getConf().isAllInputsInSameReducer()) {
// All inputs of this UnionOperator are in the same Reducer.
// We do not need to break the operator tree.
mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
return null;
}
UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
ctx.setCurrUnionOp(union);
// map-reduce job
if (uPrsCtx.allMapOnlySubQ()) {
return processMapOnlyUnion(union, stack, ctx, uCtx);
}
assert uPrsCtx != null;
Task<? extends Serializable> currTask = ctx.getCurrTask();
int pos = UnionProcFactory.getPositionParent(union, stack);
Task<? extends Serializable> uTask = null;
MapredWork uPlan = null;
// union is encountered for the first time
GenMRUnionCtx uCtxTask = ctx.getUnionTask(union);
if (uCtxTask == null) {
uPlan = GenMapRedUtils.getMapRedWork(parseCtx);
uTask = TaskFactory.get(uPlan, parseCtx.getConf());
uCtxTask = new GenMRUnionCtx(uTask);
ctx.setUnionTask(union, uCtxTask);
} else {
uTask = uCtxTask.getUTask();
}
// Copy into the current union task plan if
if (uPrsCtx.getMapOnlySubq(pos) && uPrsCtx.getRootTask(pos)) {
processSubQueryUnionMerge(ctx, uCtxTask, union, stack);
if (ctx.getRootTasks().contains(currTask)) {
ctx.getRootTasks().remove(currTask);
}
} else // If it a map-reduce job, create a temporary file
{
// is the current task a root task
if (shouldBeRootTask(currTask) && !ctx.getRootTasks().contains(currTask) && (currTask.getParentTasks() == null || currTask.getParentTasks().isEmpty())) {
ctx.getRootTasks().add(currTask);
}
processSubQueryUnionCreateIntermediate(union.getParentOperators().get(pos), union, uTask, ctx, uCtxTask);
// the currAliasId and CurrTopOp is not valid any more
ctx.setCurrAliasId(null);
ctx.setCurrTopOp(null);
ctx.getOpTaskMap().put(null, uTask);
}
ctx.setCurrTask(uTask);
mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(ctx.getCurrTask(), null));
return true;
}
use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.
the class GenMROperator method process.
/**
* Reduce Scan encountered.
*
* @param nd
* the reduce sink operator encountered
* @param procCtx
* context
*/
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
GenMRProcContext ctx = (GenMRProcContext) procCtx;
Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2));
mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(mapredCtx.getCurrTask(), mapredCtx.getCurrAliasId()));
return true;
}
use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.
the class GenMRTableScan1 method process.
/**
* Table Sink encountered.
* @param nd
* the table sink operator encountered
* @param opProcCtx
* context
*/
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
TableScanOperator op = (TableScanOperator) nd;
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
ParseContext parseCtx = ctx.getParseCtx();
Class<? extends InputFormat> inputFormat = op.getConf().getTableMetadata().getInputFormatClass();
Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
// create a dummy MapReduce task
MapredWork currWork = GenMapRedUtils.getMapRedWork(parseCtx);
MapRedTask currTask = (MapRedTask) TaskFactory.get(currWork, parseCtx.getConf());
ctx.setCurrTask(currTask);
ctx.setCurrTopOp(op);
for (String alias : parseCtx.getTopOps().keySet()) {
Operator<? extends OperatorDesc> currOp = parseCtx.getTopOps().get(alias);
if (currOp == op) {
String currAliasId = alias;
ctx.setCurrAliasId(currAliasId);
mapCurrCtx.put(op, new GenMapRedCtx(currTask, currAliasId));
if (parseCtx.getQueryProperties().isAnalyzeCommand()) {
boolean partialScan = parseCtx.getQueryProperties().isPartialScanAnalyzeCommand();
boolean noScan = parseCtx.getQueryProperties().isNoScanAnalyzeCommand();
if (OrcInputFormat.class.isAssignableFrom(inputFormat) || MapredParquetInputFormat.class.isAssignableFrom(inputFormat)) {
// For ORC and Parquet, all the following statements are the same
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS partialscan;
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
// There will not be any MR or Tez job above this task
StatsNoJobWork snjWork = new StatsNoJobWork(op.getConf().getTableMetadata().getTableSpec());
snjWork.setStatsReliable(parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
// If partition is specified, get pruned partition list
Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(op);
if (confirmedParts.size() > 0) {
Table source = op.getConf().getTableMetadata();
List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
PrunedPartitionList partList = new PrunedPartitionList(source, confirmedParts, partCols, false);
snjWork.setPrunedPartitionList(partList);
}
Task<StatsNoJobWork> snjTask = TaskFactory.get(snjWork, parseCtx.getConf());
ctx.setCurrTask(snjTask);
ctx.setCurrTopOp(null);
ctx.getRootTasks().clear();
ctx.getRootTasks().add(snjTask);
} else {
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS;
// The plan consists of a simple MapRedTask followed by a StatsTask.
// The MR task is just a simple TableScanOperator
StatsWork statsWork = new StatsWork(op.getConf().getTableMetadata().getTableSpec());
statsWork.setAggKey(op.getConf().getStatsAggPrefix());
statsWork.setStatsTmpDir(op.getConf().getTmpStatsDir());
statsWork.setSourceTask(currTask);
statsWork.setStatsReliable(parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseCtx.getConf());
currTask.addDependentTask(statsTask);
if (!ctx.getRootTasks().contains(currTask)) {
ctx.getRootTasks().add(currTask);
}
// The plan consists of a StatsTask only.
if (noScan) {
statsTask.setParentTasks(null);
statsWork.setNoScanAnalyzeCommand(true);
ctx.getRootTasks().remove(currTask);
ctx.getRootTasks().add(statsTask);
}
// ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS partialscan;
if (partialScan) {
handlePartialScanCommand(op, ctx, parseCtx, currTask, statsWork, statsTask);
}
currWork.getMapWork().setGatheringStats(true);
if (currWork.getReduceWork() != null) {
currWork.getReduceWork().setGatheringStats(true);
}
// NOTE: here we should use the new partition predicate pushdown API to get a list of
// pruned list,
// and pass it to setTaskPlan as the last parameter
Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(op);
if (confirmedPartns.size() > 0) {
Table source = op.getConf().getTableMetadata();
List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
PrunedPartitionList partList = new PrunedPartitionList(source, confirmedPartns, partCols, false);
GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx, partList);
} else {
// non-partitioned table
GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx);
}
}
}
return true;
}
}
assert false;
return null;
}
Aggregations