Search in sources :

Example 1 with LlapClusterStateForCompile

use of org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile in project hive by apache.

the class ConvertJoinMapJoin method process.

@Override
public /*
   * (non-Javadoc) we should ideally not modify the tree we traverse. However,
   * since we need to walk the tree at any time when we modify the operator, we
   * might as well do it here.
   */
Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
    OptimizeTezProcContext context = (OptimizeTezProcContext) procCtx;
    hashTableLoadFactor = context.conf.getFloatVar(ConfVars.HIVEHASHTABLELOADFACTOR);
    fastHashTableAvailable = context.conf.getBoolVar(ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED);
    JoinOperator joinOp = (JoinOperator) nd;
    // adjust noconditional task size threshold for LLAP
    LlapClusterStateForCompile llapInfo = null;
    if ("llap".equalsIgnoreCase(context.conf.getVar(ConfVars.HIVE_EXECUTION_MODE))) {
        llapInfo = LlapClusterStateForCompile.getClusterInfo(context.conf);
        llapInfo.initClusterInfo();
    }
    MemoryMonitorInfo memoryMonitorInfo = getMemoryMonitorInfo(context.conf, llapInfo);
    joinOp.getConf().setMemoryMonitorInfo(memoryMonitorInfo);
    maxJoinMemory = memoryMonitorInfo.getAdjustedNoConditionalTaskSize();
    LOG.info("maxJoinMemory: {}", maxJoinMemory);
    hashMapDataStructure = HashMapDataStructureType.of(joinOp.getConf());
    TezBucketJoinProcCtx tezBucketJoinProcCtx = new TezBucketJoinProcCtx(context.conf);
    boolean hiveConvertJoin = context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) & !context.parseContext.getDisableMapJoin();
    if (!hiveConvertJoin) {
        // we are just converting to a common merge join operator. The shuffle
        // join in map-reduce case.
        Object retval = checkAndConvertSMBJoin(context, joinOp, tezBucketJoinProcCtx);
        if (retval == null) {
            return retval;
        } else {
            fallbackToReduceSideJoin(joinOp, context);
            return null;
        }
    }
    // if we have traits, and table info is present in the traits, we know the
    // exact number of buckets. Else choose the largest number of estimated
    // reducers from the parent operators.
    int numBuckets = -1;
    if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ)) {
        numBuckets = estimateNumBuckets(joinOp, true);
    } else {
        numBuckets = 1;
    }
    LOG.info("Estimated number of buckets " + numBuckets);
    MapJoinConversion mapJoinConversion = getMapJoinConversion(joinOp, context, numBuckets, false, maxJoinMemory, true);
    if (mapJoinConversion == null) {
        Object retval = checkAndConvertSMBJoin(context, joinOp, tezBucketJoinProcCtx);
        if (retval == null) {
            return retval;
        } else {
            // only case is full outer join with SMB enabled which is not possible. Convert to regular
            // join.
            fallbackToReduceSideJoin(joinOp, context);
            return null;
        }
    }
    if (numBuckets > 1) {
        if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ)) {
            // Check if we are in LLAP, if so it needs to be determined if we should use BMJ or DPHJ
            if (llapInfo != null) {
                if (selectJoinForLlap(context, joinOp, tezBucketJoinProcCtx, llapInfo, mapJoinConversion, numBuckets)) {
                    return null;
                }
            } else if (convertJoinBucketMapJoin(joinOp, context, mapJoinConversion, tezBucketJoinProcCtx)) {
                return null;
            }
        }
    }
    // check if we can convert to map join no bucket scaling.
    LOG.info("Convert to non-bucketed map join");
    if (numBuckets != 1) {
        mapJoinConversion = getMapJoinConversion(joinOp, context, 1, false, maxJoinMemory, true);
    }
    if (mapJoinConversion == null) {
        // we are just converting to a common merge join operator. The shuffle
        // join in map-reduce case.
        fallbackToReduceSideJoin(joinOp, context);
        return null;
    }
    // Currently, this is a MJ path and we don's support FULL OUTER MapJoin yet.
    if (mapJoinConversion.getIsFullOuterJoin() && !mapJoinConversion.getIsFullOuterEnabledForMapJoin()) {
        fallbackToReduceSideJoin(joinOp, context);
        return null;
    }
    MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversion, true);
    if (mapJoinOp == null) {
        fallbackToReduceSideJoin(joinOp, context);
        return null;
    }
    // map join operator by default has no bucket cols and num of reduce sinks
    // reduced by 1
    mapJoinOp.setOpTraits(new OpTraits(null, -1, null, joinOp.getOpTraits().getNumReduceSinks()));
    preserveOperatorInfos(mapJoinOp, joinOp, context);
    // propagate this change till the next RS
    for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators()) {
        setAllChildrenTraits(childOp, mapJoinOp.getOpTraits());
    }
    return null;
}
Also used : CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) CommonJoinOperator(org.apache.hadoop.hive.ql.exec.CommonJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) MemoryMonitorInfo(org.apache.hadoop.hive.ql.exec.MemoryMonitorInfo) LlapClusterStateForCompile(org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) OptimizeTezProcContext(org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext)

Example 2 with LlapClusterStateForCompile

use of org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile in project hive by apache.

the class TezCompiler method optimizeTaskPlan.

@Override
protected void optimizeTaskPlan(List<Task<? extends Serializable>> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
    PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, pCtx.getFetchTask());
    if (conf.getBoolVar(HiveConf.ConfVars.HIVENULLSCANOPTIMIZE)) {
        physicalCtx = new NullScanOptimizer().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping null scan query optimization");
    }
    if (conf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) {
        physicalCtx = new MetadataOnlyOptimizer().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping metadata only query optimization");
    }
    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_CHECK_CROSS_PRODUCT)) {
        physicalCtx = new CrossProductHandler().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping cross product analysis");
    }
    if ("llap".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) {
        physicalCtx = new LlapPreVectorizationPass().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping llap pre-vectorization pass");
    }
    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) {
        physicalCtx = new Vectorizer().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping vectorization");
    }
    if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) {
        physicalCtx = new StageIDsRearranger().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping stage id rearranger");
    }
    if ((conf.getBoolVar(HiveConf.ConfVars.HIVE_TEZ_ENABLE_MEMORY_MANAGER)) && (conf.getBoolVar(HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN))) {
        physicalCtx = new MemoryDecider().resolve(physicalCtx);
    }
    if ("llap".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) {
        LlapClusterStateForCompile llapInfo = LlapClusterStateForCompile.getClusterInfo(conf);
        physicalCtx = new LlapDecider(llapInfo).resolve(physicalCtx);
    } else {
        LOG.debug("Skipping llap decider");
    }
    // This optimizer will serialize all filters that made it to the
    // table scan operator to avoid having to do it multiple times on
    // the backend. If you have a physical optimization that changes
    // table scans or filters, you have to invoke it before this one.
    physicalCtx = new SerializeFilter().resolve(physicalCtx);
    if (physicalCtx.getContext().getExplainAnalyze() != null) {
        new AnnotateRunTimeStatsOptimizer().resolve(physicalCtx);
    }
    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "optimizeTaskPlan");
    return;
}
Also used : LlapDecider(org.apache.hadoop.hive.ql.optimizer.physical.LlapDecider) LlapClusterStateForCompile(org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) AnnotateRunTimeStatsOptimizer(org.apache.hadoop.hive.ql.optimizer.physical.AnnotateRunTimeStatsOptimizer) MemoryDecider(org.apache.hadoop.hive.ql.optimizer.physical.MemoryDecider) MetadataOnlyOptimizer(org.apache.hadoop.hive.ql.optimizer.physical.MetadataOnlyOptimizer) PhysicalContext(org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext) LlapPreVectorizationPass(org.apache.hadoop.hive.ql.optimizer.physical.LlapPreVectorizationPass) NullScanOptimizer(org.apache.hadoop.hive.ql.optimizer.physical.NullScanOptimizer) Vectorizer(org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer) SerializeFilter(org.apache.hadoop.hive.ql.optimizer.physical.SerializeFilter) CrossProductHandler(org.apache.hadoop.hive.ql.optimizer.physical.CrossProductHandler) StageIDsRearranger(org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger)

Example 3 with LlapClusterStateForCompile

use of org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile in project hive by apache.

the class TestOperators method testNoConditionalTaskSizeForLlap.

@Test
public void testNoConditionalTaskSizeForLlap() {
    ConvertJoinMapJoin convertJoinMapJoin = new ConvertJoinMapJoin();
    long defaultNoConditionalTaskSize = 1024L * 1024L * 1024L;
    HiveConf hiveConf = new HiveConf();
    hiveConf.setLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD, defaultNoConditionalTaskSize);
    LlapClusterStateForCompile llapInfo = null;
    if ("llap".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) {
        llapInfo = LlapClusterStateForCompile.getClusterInfo(hiveConf);
        llapInfo.initClusterInfo();
    }
    // execution mode not set, null is returned
    assertEquals(defaultNoConditionalTaskSize, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getAdjustedNoConditionalTaskSize());
    hiveConf.set(HiveConf.ConfVars.HIVE_EXECUTION_MODE.varname, "llap");
    if ("llap".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) {
        llapInfo = LlapClusterStateForCompile.getClusterInfo(hiveConf);
        llapInfo.initClusterInfo();
    }
    // default executors is 4, max slots is 3. so 3 * 20% of noconditional task size will be oversubscribed
    hiveConf.set(HiveConf.ConfVars.LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR.varname, "0.2");
    hiveConf.set(HiveConf.ConfVars.LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY.varname, "3");
    double fraction = hiveConf.getFloatVar(HiveConf.ConfVars.LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR);
    int maxSlots = 3;
    long expectedSize = (long) (defaultNoConditionalTaskSize + (defaultNoConditionalTaskSize * fraction * maxSlots));
    assertEquals(expectedSize, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getAdjustedNoConditionalTaskSize());
    // num executors is less than max executors per query (which is not expected case), default executors will be
    // chosen. 4 * 20% of noconditional task size will be oversubscribed
    int chosenSlots = hiveConf.getIntVar(HiveConf.ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
    hiveConf.set(HiveConf.ConfVars.LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY.varname, "5");
    expectedSize = (long) (defaultNoConditionalTaskSize + (defaultNoConditionalTaskSize * fraction * chosenSlots));
    assertEquals(expectedSize, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getAdjustedNoConditionalTaskSize());
    // disable memory checking
    hiveConf.set(HiveConf.ConfVars.LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL.varname, "0");
    assertFalse(convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).doMemoryMonitoring());
    // invalid inflation factor
    hiveConf.set(HiveConf.ConfVars.LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL.varname, "10000");
    hiveConf.set(HiveConf.ConfVars.HIVE_HASH_TABLE_INFLATION_FACTOR.varname, "0.0f");
    assertFalse(convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).doMemoryMonitoring());
}
Also used : ConvertJoinMapJoin(org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin) LlapClusterStateForCompile(org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Test(org.junit.Test)

Example 4 with LlapClusterStateForCompile

use of org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile in project hive by apache.

the class TestOperators method testLlapMemoryOversubscriptionMaxExecutorsPerQueryCalculation.

@Test
public void testLlapMemoryOversubscriptionMaxExecutorsPerQueryCalculation() {
    ConvertJoinMapJoin convertJoinMapJoin = new ConvertJoinMapJoin();
    HiveConf hiveConf = new HiveConf();
    LlapClusterStateForCompile llapInfo = Mockito.mock(LlapClusterStateForCompile.class);
    when(llapInfo.getNumExecutorsPerNode()).thenReturn(1);
    assertEquals(1, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getMaxExecutorsOverSubscribeMemory());
    assertEquals(3, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, null).getMaxExecutorsOverSubscribeMemory());
    when(llapInfo.getNumExecutorsPerNode()).thenReturn(6);
    assertEquals(2, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getMaxExecutorsOverSubscribeMemory());
    when(llapInfo.getNumExecutorsPerNode()).thenReturn(30);
    assertEquals(8, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getMaxExecutorsOverSubscribeMemory());
    hiveConf.set(HiveConf.ConfVars.LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY.varname, "5");
    assertEquals(5, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, llapInfo).getMaxExecutorsOverSubscribeMemory());
    assertEquals(5, convertJoinMapJoin.getMemoryMonitorInfo(hiveConf, null).getMaxExecutorsOverSubscribeMemory());
}
Also used : ConvertJoinMapJoin(org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin) LlapClusterStateForCompile(org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Test(org.junit.Test)

Example 5 with LlapClusterStateForCompile

use of org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile in project hive by apache.

the class TezCompiler method optimizeTaskPlan.

@Override
protected void optimizeTaskPlan(List<Task<?>> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
    PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, pCtx.getFetchTask());
    if (conf.getBoolVar(HiveConf.ConfVars.HIVENULLSCANOPTIMIZE)) {
        physicalCtx = new NullScanOptimizer().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping null scan query optimization");
    }
    if (conf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) {
        physicalCtx = new MetadataOnlyOptimizer().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping metadata only query optimization");
    }
    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_CHECK_CROSS_PRODUCT)) {
        physicalCtx = new CrossProductHandler().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping cross product analysis");
    }
    if ("llap".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) {
        physicalCtx = new LlapPreVectorizationPass().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping llap pre-vectorization pass");
    }
    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) {
        physicalCtx = new Vectorizer().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping vectorization");
    }
    if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) {
        physicalCtx = new StageIDsRearranger().resolve(physicalCtx);
    } else {
        LOG.debug("Skipping stage id rearranger");
    }
    if ((conf.getBoolVar(HiveConf.ConfVars.HIVE_TEZ_ENABLE_MEMORY_MANAGER)) && (conf.getBoolVar(HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN))) {
        physicalCtx = new MemoryDecider().resolve(physicalCtx);
    }
    if ("llap".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) {
        LlapClusterStateForCompile llapInfo = LlapClusterStateForCompile.getClusterInfo(conf);
        physicalCtx = new LlapDecider(llapInfo).resolve(physicalCtx);
    } else {
        LOG.debug("Skipping llap decider");
    }
    // This optimizer will serialize all filters that made it to the
    // table scan operator to avoid having to do it multiple times on
    // the backend. If you have a physical optimization that changes
    // table scans or filters, you have to invoke it before this one.
    physicalCtx = new SerializeFilter().resolve(physicalCtx);
    if (physicalCtx.getContext().getExplainAnalyze() != null) {
        new AnnotateRunTimeStatsOptimizer().resolve(physicalCtx);
    }
    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "optimizeTaskPlan");
    return;
}
Also used : LlapDecider(org.apache.hadoop.hive.ql.optimizer.physical.LlapDecider) LlapClusterStateForCompile(org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) AnnotateRunTimeStatsOptimizer(org.apache.hadoop.hive.ql.optimizer.physical.AnnotateRunTimeStatsOptimizer) MemoryDecider(org.apache.hadoop.hive.ql.optimizer.physical.MemoryDecider) MetadataOnlyOptimizer(org.apache.hadoop.hive.ql.optimizer.physical.MetadataOnlyOptimizer) PhysicalContext(org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext) LlapPreVectorizationPass(org.apache.hadoop.hive.ql.optimizer.physical.LlapPreVectorizationPass) NullScanOptimizer(org.apache.hadoop.hive.ql.optimizer.physical.NullScanOptimizer) Vectorizer(org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer) SerializeFilter(org.apache.hadoop.hive.ql.optimizer.physical.SerializeFilter) CrossProductHandler(org.apache.hadoop.hive.ql.optimizer.physical.CrossProductHandler) StageIDsRearranger(org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger)

Aggregations

LlapClusterStateForCompile (org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile)5 HiveConf (org.apache.hadoop.hive.conf.HiveConf)2 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)2 ConvertJoinMapJoin (org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin)2 AnnotateRunTimeStatsOptimizer (org.apache.hadoop.hive.ql.optimizer.physical.AnnotateRunTimeStatsOptimizer)2 CrossProductHandler (org.apache.hadoop.hive.ql.optimizer.physical.CrossProductHandler)2 LlapDecider (org.apache.hadoop.hive.ql.optimizer.physical.LlapDecider)2 LlapPreVectorizationPass (org.apache.hadoop.hive.ql.optimizer.physical.LlapPreVectorizationPass)2 MemoryDecider (org.apache.hadoop.hive.ql.optimizer.physical.MemoryDecider)2 MetadataOnlyOptimizer (org.apache.hadoop.hive.ql.optimizer.physical.MetadataOnlyOptimizer)2 NullScanOptimizer (org.apache.hadoop.hive.ql.optimizer.physical.NullScanOptimizer)2 PhysicalContext (org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext)2 SerializeFilter (org.apache.hadoop.hive.ql.optimizer.physical.SerializeFilter)2 StageIDsRearranger (org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger)2 Vectorizer (org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer)2 Test (org.junit.Test)2 CommonJoinOperator (org.apache.hadoop.hive.ql.exec.CommonJoinOperator)1 CommonMergeJoinOperator (org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator)1 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)1 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)1