Search in sources :

Example 11 with SparkWork

use of org.apache.hadoop.hive.ql.plan.SparkWork in project hive by apache.

the class GenSparkSkewJoinProcessor method processSkewJoin.

@SuppressWarnings("unchecked")
public static void processSkewJoin(JoinOperator joinOp, Task<? extends Serializable> currTask, ReduceWork reduceWork, ParseContext parseCtx) throws SemanticException {
    SparkWork currentWork = ((SparkTask) currTask).getWork();
    if (currentWork.getChildren(reduceWork).size() > 0) {
        LOG.warn("Skip runtime skew join as the ReduceWork has child work and hasn't been split.");
        return;
    }
    List<Task<? extends Serializable>> children = currTask.getChildTasks();
    Path baseTmpDir = parseCtx.getContext().getMRTmpPath();
    JoinDesc joinDescriptor = joinOp.getConf();
    Map<Byte, List<ExprNodeDesc>> joinValues = joinDescriptor.getExprs();
    int numAliases = joinValues.size();
    Map<Byte, Path> bigKeysDirMap = new HashMap<Byte, Path>();
    Map<Byte, Map<Byte, Path>> smallKeysDirMap = new HashMap<Byte, Map<Byte, Path>>();
    Map<Byte, Path> skewJoinJobResultsDir = new HashMap<Byte, Path>();
    Byte[] tags = joinDescriptor.getTagOrder();
    // for each joining table, set dir for big key and small keys properly
    for (int i = 0; i < numAliases; i++) {
        Byte alias = tags[i];
        bigKeysDirMap.put(alias, GenMRSkewJoinProcessor.getBigKeysDir(baseTmpDir, alias));
        Map<Byte, Path> smallKeysMap = new HashMap<Byte, Path>();
        smallKeysDirMap.put(alias, smallKeysMap);
        for (Byte src2 : tags) {
            if (!src2.equals(alias)) {
                smallKeysMap.put(src2, GenMRSkewJoinProcessor.getSmallKeysDir(baseTmpDir, alias, src2));
            }
        }
        skewJoinJobResultsDir.put(alias, GenMRSkewJoinProcessor.getBigKeysSkewJoinResultDir(baseTmpDir, alias));
    }
    joinDescriptor.setHandleSkewJoin(true);
    joinDescriptor.setBigKeysDirMap(bigKeysDirMap);
    joinDescriptor.setSmallKeysDirMap(smallKeysDirMap);
    joinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVESKEWJOINKEY));
    // create proper table/column desc for spilled tables
    TableDesc keyTblDesc = (TableDesc) reduceWork.getKeyDesc().clone();
    List<String> joinKeys = Utilities.getColumnNames(keyTblDesc.getProperties());
    List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc.getProperties());
    Map<Byte, TableDesc> tableDescList = new HashMap<Byte, TableDesc>();
    Map<Byte, RowSchema> rowSchemaList = new HashMap<Byte, RowSchema>();
    Map<Byte, List<ExprNodeDesc>> newJoinValues = new HashMap<Byte, List<ExprNodeDesc>>();
    Map<Byte, List<ExprNodeDesc>> newJoinKeys = new HashMap<Byte, List<ExprNodeDesc>>();
    // used for create mapJoinDesc, should be in order
    List<TableDesc> newJoinValueTblDesc = new ArrayList<TableDesc>();
    for (int i = 0; i < tags.length; i++) {
        newJoinValueTblDesc.add(null);
    }
    for (int i = 0; i < numAliases; i++) {
        Byte alias = tags[i];
        List<ExprNodeDesc> valueCols = joinValues.get(alias);
        String colNames = "";
        String colTypes = "";
        int columnSize = valueCols.size();
        List<ExprNodeDesc> newValueExpr = new ArrayList<ExprNodeDesc>();
        List<ExprNodeDesc> newKeyExpr = new ArrayList<ExprNodeDesc>();
        ArrayList<ColumnInfo> columnInfos = new ArrayList<ColumnInfo>();
        boolean first = true;
        for (int k = 0; k < columnSize; k++) {
            TypeInfo type = valueCols.get(k).getTypeInfo();
            // any name, it does not matter.
            String newColName = i + "_VALUE_" + k;
            ColumnInfo columnInfo = new ColumnInfo(newColName, type, alias.toString(), false);
            columnInfos.add(columnInfo);
            newValueExpr.add(new ExprNodeColumnDesc(columnInfo.getType(), columnInfo.getInternalName(), columnInfo.getTabAlias(), false));
            if (!first) {
                colNames = colNames + ",";
                colTypes = colTypes + ",";
            }
            first = false;
            colNames = colNames + newColName;
            colTypes = colTypes + valueCols.get(k).getTypeString();
        }
        // we are putting join keys at last part of the spilled table
        for (int k = 0; k < joinKeys.size(); k++) {
            if (!first) {
                colNames = colNames + ",";
                colTypes = colTypes + ",";
            }
            first = false;
            colNames = colNames + joinKeys.get(k);
            colTypes = colTypes + joinKeyTypes.get(k);
            ColumnInfo columnInfo = new ColumnInfo(joinKeys.get(k), TypeInfoFactory.getPrimitiveTypeInfo(joinKeyTypes.get(k)), alias.toString(), false);
            columnInfos.add(columnInfo);
            newKeyExpr.add(new ExprNodeColumnDesc(columnInfo.getType(), columnInfo.getInternalName(), columnInfo.getTabAlias(), false));
        }
        newJoinValues.put(alias, newValueExpr);
        newJoinKeys.put(alias, newKeyExpr);
        tableDescList.put(alias, Utilities.getTableDesc(colNames, colTypes));
        rowSchemaList.put(alias, new RowSchema(columnInfos));
        // construct value table Desc
        String valueColNames = "";
        String valueColTypes = "";
        first = true;
        for (int k = 0; k < columnSize; k++) {
            // any name, it does not matter.
            String newColName = i + "_VALUE_" + k;
            if (!first) {
                valueColNames = valueColNames + ",";
                valueColTypes = valueColTypes + ",";
            }
            valueColNames = valueColNames + newColName;
            valueColTypes = valueColTypes + valueCols.get(k).getTypeString();
            first = false;
        }
        newJoinValueTblDesc.set((byte) i, Utilities.getTableDesc(valueColNames, valueColTypes));
    }
    joinDescriptor.setSkewKeysValuesTables(tableDescList);
    joinDescriptor.setKeyTableDesc(keyTblDesc);
    // create N-1 map join tasks
    HashMap<Path, Task<? extends Serializable>> bigKeysDirToTaskMap = new HashMap<Path, Task<? extends Serializable>>();
    List<Serializable> listWorks = new ArrayList<Serializable>();
    List<Task<? extends Serializable>> listTasks = new ArrayList<Task<? extends Serializable>>();
    for (int i = 0; i < numAliases - 1; i++) {
        Byte src = tags[i];
        HiveConf hiveConf = new HiveConf(parseCtx.getConf(), GenSparkSkewJoinProcessor.class);
        SparkWork sparkWork = new SparkWork(parseCtx.getConf().getVar(HiveConf.ConfVars.HIVEQUERYID));
        Task<? extends Serializable> skewJoinMapJoinTask = TaskFactory.get(sparkWork, hiveConf);
        skewJoinMapJoinTask.setFetchSource(currTask.isFetchSource());
        // create N TableScans
        Operator<? extends OperatorDesc>[] parentOps = new TableScanOperator[tags.length];
        for (int k = 0; k < tags.length; k++) {
            Operator<? extends OperatorDesc> ts = GenMapRedUtils.createTemporaryTableScanOperator(joinOp.getCompilationOpContext(), rowSchemaList.get((byte) k));
            ((TableScanOperator) ts).setTableDesc(tableDescList.get((byte) k));
            parentOps[k] = ts;
        }
        // create the MapJoinOperator
        String dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix();
        MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc, newJoinValues, newJoinValueTblDesc, newJoinValueTblDesc, joinDescriptor.getOutputColumnNames(), i, joinDescriptor.getConds(), joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(), dumpFilePrefix);
        mapJoinDescriptor.setTagOrder(tags);
        mapJoinDescriptor.setHandleSkewJoin(false);
        mapJoinDescriptor.setNullSafes(joinDescriptor.getNullSafes());
        // temporarily, mark it as child of all the TS
        MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild(joinOp.getCompilationOpContext(), mapJoinDescriptor, null, parentOps);
        // clone the original join operator, and replace it with the MJ
        // this makes sure MJ has the same downstream operator plan as the original join
        List<Operator<?>> reducerList = new ArrayList<Operator<?>>();
        reducerList.add(reduceWork.getReducer());
        Operator<? extends OperatorDesc> reducer = SerializationUtilities.cloneOperatorTree(reducerList).get(0);
        Preconditions.checkArgument(reducer instanceof JoinOperator, "Reducer should be join operator, but actually is " + reducer.getName());
        JoinOperator cloneJoinOp = (JoinOperator) reducer;
        List<Operator<? extends OperatorDesc>> childOps = cloneJoinOp.getChildOperators();
        for (Operator<? extends OperatorDesc> childOp : childOps) {
            childOp.replaceParent(cloneJoinOp, mapJoinOp);
        }
        mapJoinOp.setChildOperators(childOps);
        // set memory usage for the MJ operator
        setMemUsage(mapJoinOp, skewJoinMapJoinTask, parseCtx);
        // create N MapWorks and add them to the SparkWork
        MapWork bigMapWork = null;
        Map<Byte, Path> smallTblDirs = smallKeysDirMap.get(src);
        for (int j = 0; j < tags.length; j++) {
            MapWork mapWork = PlanUtils.getMapRedWork().getMapWork();
            sparkWork.add(mapWork);
            // This code has been only added for testing
            boolean mapperCannotSpanPartns = parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS);
            mapWork.setMapperCannotSpanPartns(mapperCannotSpanPartns);
            Operator<? extends OperatorDesc> tableScan = parentOps[j];
            String alias = tags[j].toString();
            ArrayList<String> aliases = new ArrayList<String>();
            aliases.add(alias);
            Path path;
            if (j == i) {
                path = bigKeysDirMap.get(tags[j]);
                bigKeysDirToTaskMap.put(path, skewJoinMapJoinTask);
                bigMapWork = mapWork;
            } else {
                path = smallTblDirs.get(tags[j]);
            }
            mapWork.addPathToAlias(path, aliases);
            mapWork.getAliasToWork().put(alias, tableScan);
            PartitionDesc partitionDesc = new PartitionDesc(tableDescList.get(tags[j]), null);
            mapWork.addPathToPartitionInfo(path, partitionDesc);
            mapWork.getAliasToPartnInfo().put(alias, partitionDesc);
            mapWork.setName("Map " + GenSparkUtils.getUtils().getNextSeqNumber());
        }
        // connect all small dir map work to the big dir map work
        Preconditions.checkArgument(bigMapWork != null, "Haven't identified big dir MapWork");
        // these 2 flags are intended only for the big-key map work
        bigMapWork.setNumMapTasks(HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.HIVESKEWJOINMAPJOINNUMMAPTASK));
        bigMapWork.setMinSplitSize(HiveConf.getLongVar(hiveConf, HiveConf.ConfVars.HIVESKEWJOINMAPJOINMINSPLIT));
        // use HiveInputFormat so that we can control the number of map tasks
        bigMapWork.setInputformat(HiveInputFormat.class.getName());
        for (BaseWork work : sparkWork.getRoots()) {
            Preconditions.checkArgument(work instanceof MapWork, "All root work should be MapWork, but got " + work.getClass().getSimpleName());
            if (work != bigMapWork) {
                sparkWork.connect(work, bigMapWork, new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE));
            }
        }
        // insert SparkHashTableSink and Dummy operators
        for (int j = 0; j < tags.length; j++) {
            if (j != i) {
                insertSHTS(tags[j], (TableScanOperator) parentOps[j], bigMapWork);
            }
        }
        listWorks.add(skewJoinMapJoinTask.getWork());
        listTasks.add(skewJoinMapJoinTask);
    }
    if (children != null) {
        for (Task<? extends Serializable> tsk : listTasks) {
            for (Task<? extends Serializable> oldChild : children) {
                tsk.addDependentTask(oldChild);
            }
        }
        currTask.setChildTasks(new ArrayList<Task<? extends Serializable>>());
        for (Task<? extends Serializable> oldChild : children) {
            oldChild.getParentTasks().remove(currTask);
        }
        listTasks.addAll(children);
        for (Task<? extends Serializable> oldChild : children) {
            listWorks.add(oldChild.getWork());
        }
    }
    ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx context = new ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx(bigKeysDirToTaskMap, children);
    ConditionalWork cndWork = new ConditionalWork(listWorks);
    ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, parseCtx.getConf());
    cndTsk.setListTasks(listTasks);
    cndTsk.setResolver(new ConditionalResolverSkewJoin());
    cndTsk.setResolverCtx(context);
    currTask.setChildTasks(new ArrayList<Task<? extends Serializable>>());
    currTask.addDependentTask(cndTsk);
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) SparkTask(org.apache.hadoop.hive.ql.exec.spark.SparkTask) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConditionalWork(org.apache.hadoop.hive.ql.plan.ConditionalWork) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) List(java.util.List) ArrayList(java.util.ArrayList) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) ConditionalResolverSkewJoin(org.apache.hadoop.hive.ql.plan.ConditionalResolverSkewJoin) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) Map(java.util.Map) HashMap(java.util.HashMap) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) SparkHashTableSinkOperator(org.apache.hadoop.hive.ql.exec.SparkHashTableSinkOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) Path(org.apache.hadoop.fs.Path) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) SparkTask(org.apache.hadoop.hive.ql.exec.spark.SparkTask) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) SparkEdgeProperty(org.apache.hadoop.hive.ql.plan.SparkEdgeProperty) PartitionDesc(org.apache.hadoop.hive.ql.plan.PartitionDesc)

Example 12 with SparkWork

use of org.apache.hadoop.hive.ql.plan.SparkWork in project hive by apache.

the class SparkTask method getOperatorCounters.

private Map<String, List<String>> getOperatorCounters() {
    String groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
    Map<String, List<String>> counters = new HashMap<String, List<String>>();
    List<String> hiveCounters = new LinkedList<String>();
    counters.put(groupName, hiveCounters);
    hiveCounters.add(Operator.HIVECOUNTERCREATEDFILES);
    // Spark transformation and Hive operators in SparkWork.
    for (MapOperator.Counter counter : MapOperator.Counter.values()) {
        hiveCounters.add(counter.toString());
    }
    SparkWork sparkWork = this.getWork();
    for (BaseWork work : sparkWork.getAllWork()) {
        for (Operator<? extends OperatorDesc> operator : work.getAllOperators()) {
            if (operator instanceof FileSinkOperator) {
                for (FileSinkOperator.Counter counter : FileSinkOperator.Counter.values()) {
                    hiveCounters.add(((FileSinkOperator) operator).getCounterName(counter));
                }
            } else if (operator instanceof ReduceSinkOperator) {
                for (ReduceSinkOperator.Counter counter : ReduceSinkOperator.Counter.values()) {
                    hiveCounters.add(((ReduceSinkOperator) operator).getCounterName(counter, conf));
                }
            } else if (operator instanceof ScriptOperator) {
                for (ScriptOperator.Counter counter : ScriptOperator.Counter.values()) {
                    hiveCounters.add(counter.toString());
                }
            } else if (operator instanceof JoinOperator) {
                for (JoinOperator.SkewkeyTableCounter counter : JoinOperator.SkewkeyTableCounter.values()) {
                    hiveCounters.add(counter.toString());
                }
            }
        }
    }
    return counters;
}
Also used : JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) HashMap(java.util.HashMap) ScriptOperator(org.apache.hadoop.hive.ql.exec.ScriptOperator) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) LinkedList(java.util.LinkedList) MapOperator(org.apache.hadoop.hive.ql.exec.MapOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork)

Example 13 with SparkWork

use of org.apache.hadoop.hive.ql.plan.SparkWork in project hive by apache.

the class SparkTask method execute.

@Override
public int execute(DriverContext driverContext) {
    int rc = 0;
    perfLogger = SessionState.getPerfLogger();
    SparkSession sparkSession = null;
    SparkSessionManager sparkSessionManager = null;
    try {
        printConfigInfo();
        sparkSessionManager = SparkSessionManagerImpl.getInstance();
        sparkSession = SparkUtilities.getSparkSession(conf, sparkSessionManager);
        SparkWork sparkWork = getWork();
        sparkWork.setRequiredCounterPrefix(getOperatorCounters());
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
        submitTime = perfLogger.getStartTime(PerfLogger.SPARK_SUBMIT_JOB);
        SparkJobRef jobRef = sparkSession.submit(driverContext, sparkWork);
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
        addToHistory(jobRef);
        sparkJobID = jobRef.getJobId();
        this.jobID = jobRef.getSparkJobStatus().getAppID();
        rc = jobRef.monitorJob();
        SparkJobStatus sparkJobStatus = jobRef.getSparkJobStatus();
        getSparkJobInfo(sparkJobStatus, rc);
        if (rc == 0) {
            sparkStatistics = sparkJobStatus.getSparkStatistics();
            if (LOG.isInfoEnabled() && sparkStatistics != null) {
                LOG.info(String.format("=====Spark Job[%s] statistics=====", jobRef.getJobId()));
                logSparkStatistic(sparkStatistics);
            }
            LOG.info("Execution completed successfully");
        } else if (rc == 2) {
            // Cancel job if the monitor found job submission timeout.
            // TODO: If the timeout is because of lack of resources in the cluster, we should
            // ideally also cancel the app request here. But w/o facilities from Spark or YARN,
            // it's difficult to do it on hive side alone. See HIVE-12650.
            jobRef.cancelJob();
        }
        if (this.jobID == null) {
            this.jobID = sparkJobStatus.getAppID();
        }
        sparkJobStatus.cleanup();
    } catch (Exception e) {
        String msg = "Failed to execute spark task, with exception '" + Utilities.getNameMessage(e) + "'";
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(msg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        LOG.error(msg, e);
        setException(e);
        rc = 1;
    } finally {
        startTime = perfLogger.getEndTime(PerfLogger.SPARK_SUBMIT_TO_RUNNING);
        // In this case, set startTime the same as submitTime.
        if (startTime < submitTime) {
            startTime = submitTime;
        }
        finishTime = perfLogger.getEndTime(PerfLogger.SPARK_RUN_JOB);
        Utilities.clearWork(conf);
        if (sparkSession != null && sparkSessionManager != null) {
            rc = close(rc);
            try {
                sparkSessionManager.returnSession(sparkSession);
            } catch (HiveException ex) {
                LOG.error("Failed to return the session to SessionManager", ex);
            }
        }
    }
    return rc;
}
Also used : SparkSession(org.apache.hadoop.hive.ql.exec.spark.session.SparkSession) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SparkJobRef(org.apache.hadoop.hive.ql.exec.spark.status.SparkJobRef) SparkJobStatus(org.apache.hadoop.hive.ql.exec.spark.status.SparkJobStatus) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) SparkSessionManager(org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManager) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException)

Aggregations

SparkWork (org.apache.hadoop.hive.ql.plan.SparkWork)13 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)7 Serializable (java.io.Serializable)6 ArrayList (java.util.ArrayList)6 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)6 List (java.util.List)5 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)5 Operator (org.apache.hadoop.hive.ql.exec.Operator)5 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)5 Task (org.apache.hadoop.hive.ql.exec.Task)5 SparkTask (org.apache.hadoop.hive.ql.exec.spark.SparkTask)5 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)5 ConditionalTask (org.apache.hadoop.hive.ql.exec.ConditionalTask)4 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)4 SparkEdgeProperty (org.apache.hadoop.hive.ql.plan.SparkEdgeProperty)4 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)3 HashTableDummyOperator (org.apache.hadoop.hive.ql.exec.HashTableDummyOperator)3 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)3 SMBMapJoinOperator (org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator)3 MapredWork (org.apache.hadoop.hive.ql.plan.MapredWork)3