use of org.apache.hadoop.hive.ql.optimizer.SortBucketJoinProcCtx in project hive by apache.
the class SparkSMBJoinHintOptimizer method process.
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
MapJoinOperator mapJoinOp = (MapJoinOperator) nd;
SortBucketJoinProcCtx smbJoinContext = (SortBucketJoinProcCtx) procCtx;
boolean convert = canConvertBucketMapJoinToSMBJoin(mapJoinOp, stack, smbJoinContext, nodeOutputs);
// and sort merge bucketed mapjoin cannot be performed
if (!convert && pGraphContext.getConf().getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN)) {
throw new SemanticException(ErrorMsg.SORTMERGE_MAPJOIN_FAILED.getMsg());
}
if (convert) {
removeSmallTableReduceSink(mapJoinOp);
convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext);
}
return null;
}
use of org.apache.hadoop.hive.ql.optimizer.SortBucketJoinProcCtx in project hive by apache.
the class SparkJoinHintOptimizer method process.
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
OptimizeSparkProcContext context = (OptimizeSparkProcContext) procCtx;
HiveConf hiveConf = context.getParseContext().getConf();
// Convert from mapjoin to bucket map join if enabled.
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN) || hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN)) {
BucketJoinProcCtx bjProcCtx = new BucketJoinProcCtx(hiveConf);
bucketMapJoinOptimizer.process(nd, stack, bjProcCtx, nodeOutputs);
}
// Convert from bucket map join to sort merge bucket map join if enabled.
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN)) {
SortBucketJoinProcCtx smbJoinCtx = new SortBucketJoinProcCtx(hiveConf);
smbMapJoinOptimizer.process(nd, stack, smbJoinCtx, nodeOutputs);
}
return null;
}
use of org.apache.hadoop.hive.ql.optimizer.SortBucketJoinProcCtx in project hive by apache.
the class SparkSortMergeJoinOptimizer method process.
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
JoinOperator joinOp = (JoinOperator) nd;
HiveConf conf = ((OptimizeSparkProcContext) procCtx).getParseContext().getConf();
if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN)) {
return null;
}
SortBucketJoinProcCtx smbJoinContext = new SortBucketJoinProcCtx(conf);
boolean convert = canConvertJoinToSMBJoin(joinOp, smbJoinContext, pGraphContext, stack);
if (convert) {
return convertJoinToSMBJoinAndReturn(joinOp, smbJoinContext);
}
return null;
}
Aggregations