use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.
the class ConvertJoinMapJoin method convertJoinBucketMapJoin.
private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcContext context, int bigTablePosition, TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException {
if (!checkConvertJoinBucketMapJoin(joinOp, bigTablePosition, tezBucketJoinProcCtx)) {
LOG.info("Check conversion to bucket map join failed.");
return false;
}
// Incase the join has extra keys other than bucketed columns, partition keys need to be updated
// on small table(s).
ReduceSinkOperator bigTableRS = (ReduceSinkOperator) joinOp.getParentOperators().get(bigTablePosition);
OpTraits opTraits = bigTableRS.getOpTraits();
List<List<String>> listBucketCols = opTraits.getBucketColNames();
ArrayList<ExprNodeDesc> bigTablePartitionCols = bigTableRS.getConf().getPartitionCols();
boolean updatePartitionCols = false;
List<Integer> positions = new ArrayList<>();
if (listBucketCols.get(0).size() != bigTablePartitionCols.size()) {
updatePartitionCols = true;
// Prepare updated partition columns for small table(s).
// Get the positions of bucketed columns
int i = 0;
Map<String, ExprNodeDesc> colExprMap = bigTableRS.getColumnExprMap();
for (ExprNodeDesc bigTableExpr : bigTablePartitionCols) {
// It is guaranteed there is only 1 list within listBucketCols.
for (String colName : listBucketCols.get(0)) {
if (colExprMap.get(colName).isSame(bigTableExpr)) {
positions.add(i++);
}
}
}
}
MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, bigTablePosition, true);
if (mapJoinOp == null) {
LOG.debug("Conversion to bucket map join failed.");
return false;
}
MapJoinDesc joinDesc = mapJoinOp.getConf();
joinDesc.setBucketMapJoin(true);
// we can set the traits for this join operator
opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), tezBucketJoinProcCtx.getNumBuckets(), null, joinOp.getOpTraits().getNumReduceSinks());
mapJoinOp.setOpTraits(opTraits);
mapJoinOp.setStatistics(joinOp.getStatistics());
setNumberOfBucketsOnChildren(mapJoinOp);
// Once the conversion is done, we can set the partitioner to bucket cols on the small table
Map<String, Integer> bigTableBucketNumMapping = new HashMap<String, Integer>();
bigTableBucketNumMapping.put(joinDesc.getBigTableAlias(), tezBucketJoinProcCtx.getNumBuckets());
joinDesc.setBigTableBucketNumMapping(bigTableBucketNumMapping);
// Update the partition columns in small table to ensure correct routing of hash tables.
if (updatePartitionCols) {
// on the small table side.
for (Operator<?> op : mapJoinOp.getParentOperators()) {
if (!(op instanceof ReduceSinkOperator))
continue;
;
ReduceSinkOperator rsOp = (ReduceSinkOperator) op;
ArrayList<ExprNodeDesc> newPartitionCols = new ArrayList<>();
ArrayList<ExprNodeDesc> partitionCols = rsOp.getConf().getPartitionCols();
for (Integer position : positions) {
newPartitionCols.add(partitionCols.get(position));
}
rsOp.getConf().setPartitionCols(newPartitionCols);
}
}
// Update the memory monitor info for LLAP.
MemoryMonitorInfo memoryMonitorInfo = joinDesc.getMemoryMonitorInfo();
if (memoryMonitorInfo.isLlap()) {
memoryMonitorInfo.setHashTableInflationFactor(1);
memoryMonitorInfo.setMemoryOverSubscriptionFactor(0);
}
return true;
}
use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.
the class ConvertJoinMapJoin method convertJoinDynamicPartitionedHashJoin.
private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, OptimizeTezProcContext context, final long maxSize) throws SemanticException {
// Attempt dynamic partitioned hash join
// Since we don't have big table index yet, must start with estimate of numReducers
int numReducers = estimateNumBuckets(joinOp, false);
LOG.info("Try dynamic partitioned hash join with estimated " + numReducers + " reducers");
int bigTablePos = getMapJoinConversionPos(joinOp, context, numReducers, false, maxSize, false);
if (bigTablePos >= 0) {
// Now that we have the big table index, get real numReducers value based on big table RS
ReduceSinkOperator bigTableParentRS = (ReduceSinkOperator) (joinOp.getParentOperators().get(bigTablePos));
numReducers = bigTableParentRS.getConf().getNumReducers();
LOG.debug("Real big table reducers = " + numReducers);
MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, bigTablePos, false);
if (mapJoinOp != null) {
LOG.info("Selected dynamic partitioned hash join");
mapJoinOp.getConf().setDynamicPartitionHashJoin(true);
// Set OpTraits for dynamically partitioned hash join:
// bucketColNames: Re-use previous joinOp's bucketColNames. Parent operators should be
// reduce sink, which should have bucket columns based on the join keys.
// numBuckets: set to number of reducers
// sortCols: This is an unsorted join - no sort cols
OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numReducers, null, joinOp.getOpTraits().getNumReduceSinks());
mapJoinOp.setOpTraits(opTraits);
mapJoinOp.setStatistics(joinOp.getStatistics());
// propagate this change till the next RS
for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators()) {
setAllChildrenTraits(childOp, mapJoinOp.getOpTraits());
}
return true;
}
}
return false;
}
use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.
the class MapJoinProcessor method genMapJoinLocalWork.
/**
* Generate the MapRed Local Work for the given map-join operator
*
* @param newWork
* @param mapJoinOp
* map-join operator for which local work needs to be generated.
* @param bigTablePos
* @throws SemanticException
*/
private static void genMapJoinLocalWork(MapredWork newWork, MapJoinOperator mapJoinOp, int bigTablePos) throws SemanticException {
// keep the small table alias to avoid concurrent modification exception
ArrayList<String> smallTableAliasList = new ArrayList<String>();
// create a new MapredLocalWork
MapredLocalWork newLocalWork = new MapredLocalWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>(), new LinkedHashMap<String, FetchWork>());
for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : newWork.getMapWork().getAliasToWork().entrySet()) {
String alias = entry.getKey();
Operator<? extends OperatorDesc> op = entry.getValue();
// if the table scan is for big table; then skip it
// tracing down the operator tree from the table scan operator
Operator<? extends OperatorDesc> parentOp = op;
Operator<? extends OperatorDesc> childOp = op.getChildOperators().get(0);
while ((childOp != null) && (!childOp.equals(mapJoinOp))) {
parentOp = childOp;
assert parentOp.getChildOperators().size() == 1;
childOp = parentOp.getChildOperators().get(0);
}
if (childOp == null) {
throw new SemanticException("Cannot find join op by tracing down the table scan operator tree");
}
// skip the big table pos
int i = childOp.getParentOperators().indexOf(parentOp);
if (i == bigTablePos) {
continue;
}
// set alias to work and put into smallTableAliasList
newLocalWork.getAliasToWork().put(alias, op);
smallTableAliasList.add(alias);
// get input path and remove this alias from pathToAlias
// because this file will be fetched by fetch operator
LinkedHashMap<Path, ArrayList<String>> pathToAliases = newWork.getMapWork().getPathToAliases();
// keep record all the input path for this alias
HashSet<Path> pathSet = new HashSet<>();
HashSet<Path> emptyPath = new HashSet<>();
for (Map.Entry<Path, ArrayList<String>> entry2 : pathToAliases.entrySet()) {
Path path = entry2.getKey();
ArrayList<String> list = entry2.getValue();
if (list.contains(alias)) {
// add to path set
pathSet.add(path);
// remove this alias from the alias list
list.remove(alias);
if (list.size() == 0) {
emptyPath.add(path);
}
}
}
// remove the path, with which no alias associates
for (Path path : emptyPath) {
newWork.getMapWork().removePathToAlias(path);
}
// create fetch work
FetchWork fetchWork = null;
List<Path> partDir = new ArrayList<Path>();
List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
for (Path tablePath : pathSet) {
PartitionDesc partitionDesc = newWork.getMapWork().getPathToPartitionInfo().get(tablePath);
// create fetchwork for non partitioned table
if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) {
fetchWork = new FetchWork(tablePath, partitionDesc.getTableDesc());
break;
}
// if table is partitioned,add partDir and partitionDesc
partDir.add(tablePath);
partDesc.add(partitionDesc);
}
// create fetchwork for partitioned table
if (fetchWork == null) {
TableDesc table = newWork.getMapWork().getAliasToPartnInfo().get(alias).getTableDesc();
fetchWork = new FetchWork(partDir, partDesc, table);
}
// set alias to fetch work
newLocalWork.getAliasToFetchWork().put(alias, fetchWork);
}
// remove small table ailias from aliasToWork;Avoid concurrent modification
for (String alias : smallTableAliasList) {
newWork.getMapWork().getAliasToWork().remove(alias);
}
// set up local work
newWork.getMapWork().setMapRedLocalWork(newLocalWork);
// remove reducer
newWork.setReduceWork(null);
}
use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.
the class MapJoinProcessor method transform.
/**
* Transform the query tree. For each join, check if it is a map-side join (user specified). If
* yes, convert it to a map-side join.
*
* @param pactx
* current parse context
*/
@Override
public ParseContext transform(ParseContext pactx) throws SemanticException {
List<MapJoinOperator> listMapJoinOps = new ArrayList<MapJoinOperator>();
// traverse all the joins and convert them if necessary
if (pactx.getJoinOps() != null) {
Set<JoinOperator> joinMap = new HashSet<JoinOperator>();
Set<MapJoinOperator> mapJoinMap = pactx.getMapJoinOps();
if (mapJoinMap == null) {
mapJoinMap = new HashSet<MapJoinOperator>();
pactx.setMapJoinOps(mapJoinMap);
}
Iterator<JoinOperator> joinCtxIter = pactx.getJoinOps().iterator();
while (joinCtxIter.hasNext()) {
JoinOperator joinOp = joinCtxIter.next();
int mapJoinPos = mapSideJoin(joinOp);
if (mapJoinPos >= 0) {
MapJoinOperator mapJoinOp = generateMapJoinOperator(pactx, joinOp, mapJoinPos);
listMapJoinOps.add(mapJoinOp);
mapJoinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
mapJoinMap.add(mapJoinOp);
} else {
joinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
joinMap.add(joinOp);
}
}
// store the new joinContext
pactx.setJoinOps(joinMap);
}
// Go over the list and find if a reducer is not needed
List<AbstractMapJoinOperator<? extends MapJoinDesc>> listMapJoinOpsNoRed = new ArrayList<AbstractMapJoinOperator<? extends MapJoinDesc>>();
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack.
// The dispatcher generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R0", MapJoinOperator.getOperatorName() + "%"), getCurrentMapJoin());
opRules.put(new RuleRegExp("R1", MapJoinOperator.getOperatorName() + "%.*" + FileSinkOperator.getOperatorName() + "%"), getMapJoinFS());
opRules.put(new RuleRegExp("R2", MapJoinOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), getMapJoinDefault());
opRules.put(new RuleRegExp("R4", MapJoinOperator.getOperatorName() + "%.*" + UnionOperator.getOperatorName() + "%"), getMapJoinDefault());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx(listMapJoinOpsNoRed, pactx));
GraphWalker ogw = new GenMapRedWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(listMapJoinOps);
ogw.startWalking(topNodes, null);
pactx.setListMapJoinOpsNoReducer(listMapJoinOpsNoRed);
return pactx;
}
use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.
the class MapJoinProcessor method convertMapJoin.
/**
* convert a regular join to a a map-side join.
*
* @param opParseCtxMap
* @param op
* join operator
* @param joinTree
* qb join tree
* @param mapJoinPos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
* @param noCheckOuterJoin
* @param validateMapJoinTree
*/
public MapJoinOperator convertMapJoin(HiveConf conf, JoinOperator op, boolean leftInputJoin, String[] baseSrc, List<String> mapAliases, int mapJoinPos, boolean noCheckOuterJoin, boolean validateMapJoinTree) throws SemanticException {
// outer join cannot be performed on a table which is being cached
JoinDesc desc = op.getConf();
JoinCondDesc[] condns = desc.getConds();
if (!noCheckOuterJoin) {
if (checkMapJoin(mapJoinPos, condns) < 0) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
}
// Walk over all the sources (which are guaranteed to be reduce sink
// operators).
// The join outputs a concatenation of all the inputs.
List<Operator<? extends OperatorDesc>> parentOps = op.getParentOperators();
List<Operator<? extends OperatorDesc>> newParentOps = new ArrayList<Operator<? extends OperatorDesc>>();
List<Operator<? extends OperatorDesc>> oldReduceSinkParentOps = new ArrayList<Operator<? extends OperatorDesc>>();
// found a source which is not to be stored in memory
if (leftInputJoin) {
// assert mapJoinPos == 0;
Operator<? extends OperatorDesc> parentOp = parentOps.get(0);
assert parentOp.getParentOperators().size() == 1;
Operator<? extends OperatorDesc> grandParentOp = parentOp.getParentOperators().get(0);
oldReduceSinkParentOps.add(parentOp);
newParentOps.add(grandParentOp);
}
byte pos = 0;
// Remove parent reduce-sink operators
for (String src : baseSrc) {
if (src != null) {
Operator<? extends OperatorDesc> parentOp = parentOps.get(pos);
assert parentOp.getParentOperators().size() == 1;
Operator<? extends OperatorDesc> grandParentOp = parentOp.getParentOperators().get(0);
oldReduceSinkParentOps.add(parentOp);
newParentOps.add(grandParentOp);
}
pos++;
}
// create the map-join operator
MapJoinOperator mapJoinOp = convertJoinOpMapJoinOp(conf, op, leftInputJoin, baseSrc, mapAliases, mapJoinPos, noCheckOuterJoin);
// remove old parents
for (pos = 0; pos < newParentOps.size(); pos++) {
newParentOps.get(pos).replaceChild(oldReduceSinkParentOps.get(pos), mapJoinOp);
}
mapJoinOp.getParentOperators().removeAll(oldReduceSinkParentOps);
mapJoinOp.setParentOperators(newParentOps);
// make sure only map-joins can be performed.
if (validateMapJoinTree) {
validateMapJoinTypes(mapJoinOp);
}
return mapJoinOp;
}
Aggregations