Search in sources :

Example 6 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class TaskCompiler method compile.

@SuppressWarnings({ "nls", "unchecked" })
public void compile(final ParseContext pCtx, final List<Task<? extends Serializable>> rootTasks, final HashSet<ReadEntity> inputs, final HashSet<WriteEntity> outputs) throws SemanticException {
    Context ctx = pCtx.getContext();
    GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx();
    List<Task<MoveWork>> mvTask = new ArrayList<>();
    List<LoadTableDesc> loadTableWork = pCtx.getLoadTableWork();
    List<LoadFileDesc> loadFileWork = pCtx.getLoadFileWork();
    boolean isCStats = pCtx.getQueryProperties().isAnalyzeRewrite();
    int outerQueryLimit = pCtx.getQueryProperties().getOuterQueryLimit();
    if (pCtx.getFetchTask() != null) {
        if (pCtx.getFetchTask().getTblDesc() == null) {
            return;
        }
        pCtx.getFetchTask().getWork().setHiveServerQuery(SessionState.get().isHiveServerQuery());
        TableDesc resultTab = pCtx.getFetchTask().getTblDesc();
        // then either the ThriftFormatter or the DefaultFetchFormatter should be used.
        if (!resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())) {
            if (SessionState.get().isHiveServerQuery()) {
                conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName());
            } else {
                String formatterName = conf.get(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER);
                if (formatterName == null || formatterName.isEmpty()) {
                    conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, DefaultFetchFormatter.class.getName());
                }
            }
        }
        return;
    }
    optimizeOperatorPlan(pCtx, inputs, outputs);
    /*
     * In case of a select, use a fetch task instead of a move task.
     * If the select is from analyze table column rewrite, don't create a fetch task. Instead create
     * a column stats task later.
     */
    if (pCtx.getQueryProperties().isQuery() && !isCStats) {
        if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) {
            throw new SemanticException(ErrorMsg.INVALID_LOAD_TABLE_FILE_WORK.getMsg());
        }
        LoadFileDesc loadFileDesc = loadFileWork.get(0);
        String cols = loadFileDesc.getColumns();
        String colTypes = loadFileDesc.getColumnTypes();
        String resFileFormat;
        TableDesc resultTab = pCtx.getFetchTableDesc();
        if (resultTab == null) {
            resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
            if (SessionState.get().getIsUsingThriftJDBCBinarySerDe() && (resFileFormat.equalsIgnoreCase("SequenceFile"))) {
                resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat, ThriftJDBCBinarySerDe.class);
                // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
                // read formatted thrift objects from the output SequenceFile written by Tasks.
                conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
            } else {
                resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat, LazySimpleSerDe.class);
            }
        } else {
            if (resultTab.getProperties().getProperty(serdeConstants.SERIALIZATION_LIB).equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())) {
                // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
                // read formatted thrift objects from the output SequenceFile written by Tasks.
                conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
            }
        }
        FetchWork fetch = new FetchWork(loadFileDesc.getSourcePath(), resultTab, outerQueryLimit);
        boolean isHiveServerQuery = SessionState.get().isHiveServerQuery();
        fetch.setHiveServerQuery(isHiveServerQuery);
        fetch.setSource(pCtx.getFetchSource());
        fetch.setSink(pCtx.getFetchSink());
        if (isHiveServerQuery && null != resultTab && resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
            fetch.setIsUsingThriftJDBCBinarySerDe(true);
        } else {
            fetch.setIsUsingThriftJDBCBinarySerDe(false);
        }
        pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch));
        // For the FetchTask, the limit optimization requires we fetch all the rows
        // in memory and count how many rows we get. It's not practical if the
        // limit factor is too big
        int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
        if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) {
            LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimization.");
            globalLimitCtx.disableOpt();
        }
        if (outerQueryLimit == 0) {
            // Believe it or not, some tools do generate queries with limit 0 and than expect
            // query to run quickly. Lets meet their requirement.
            LOG.info("Limit 0. No query execution needed.");
            return;
        }
    } else if (!isCStats) {
        for (LoadTableDesc ltd : loadTableWork) {
            Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false));
            mvTask.add(tsk);
        }
        boolean oneLoadFileForCtas = true;
        for (LoadFileDesc lfd : loadFileWork) {
            if (pCtx.getQueryProperties().isCTAS() || pCtx.getQueryProperties().isMaterializedView()) {
                if (!oneLoadFileForCtas) {
                    // should not have more than 1 load file for CTAS.
                    throw new SemanticException("One query is not expected to contain multiple CTAS loads statements");
                }
                setLoadFileLocation(pCtx, lfd);
                oneLoadFileForCtas = false;
            }
            mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false)));
        }
    }
    generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs);
    // For each task, set the key descriptor for the reducer
    for (Task<? extends Serializable> rootTask : rootTasks) {
        GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
    }
    // to be used, please do so
    for (Task<? extends Serializable> rootTask : rootTasks) {
        setInputFormat(rootTask);
    }
    optimizeTaskPlan(rootTasks, pCtx, ctx);
    /*
     * If the query was the result of analyze table column compute statistics rewrite, create
     * a column stats task instead of a fetch task to persist stats to the metastore.
     * As per HIVE-15903, we will also collect table stats when user computes column stats.
     * That means, if isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()
     * We need to collect table stats
     * if isCStats, we need to include a basic stats task
     * else it is ColumnStatsAutoGather, which should have a move task with a stats task already.
     */
    if (isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()) {
        // map from tablename to task (ColumnStatsTask which includes a BasicStatsTask)
        Map<String, StatsTask> map = new LinkedHashMap<>();
        if (isCStats) {
            if (rootTasks == null || rootTasks.size() != 1 || pCtx.getTopOps() == null || pCtx.getTopOps().size() != 1) {
                throw new SemanticException("Can not find correct root task!");
            }
            try {
                Task<? extends Serializable> root = rootTasks.iterator().next();
                StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values().iterator().next(), root, outputs);
                root.addDependentTask(tsk);
                map.put(extractTableFullName(tsk), tsk);
            } catch (HiveException e) {
                throw new SemanticException(e);
            }
            genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0);
        } else {
            Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
            getLeafTasks(rootTasks, leafTasks);
            List<Task<? extends Serializable>> nonStatsLeafTasks = new ArrayList<>();
            for (Task<? extends Serializable> tsk : leafTasks) {
                // map table name to the correct ColumnStatsTask
                if (tsk instanceof StatsTask) {
                    map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk);
                } else {
                    nonStatsLeafTasks.add(tsk);
                }
            }
            // add cStatsTask as a dependent of all the nonStatsLeafTasks
            for (Task<? extends Serializable> tsk : nonStatsLeafTasks) {
                for (Task<? extends Serializable> cStatsTask : map.values()) {
                    tsk.addDependentTask(cStatsTask);
                }
            }
            for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx.getColumnStatsAutoGatherContexts()) {
                if (!columnStatsAutoGatherContext.isInsertInto()) {
                    genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, 0);
                } else {
                    int numBitVector;
                    try {
                        numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
                    } catch (Exception e) {
                        throw new SemanticException(e.getMessage());
                    }
                    genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, numBitVector);
                }
            }
        }
    }
    decideExecMode(rootTasks, ctx, globalLimitCtx);
    if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization()) {
        // generate a DDL task and make it a dependent task of the leaf
        CreateTableDesc crtTblDesc = pCtx.getCreateTable();
        crtTblDesc.validate(conf);
        Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc));
        patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask);
    } else if (pCtx.getQueryProperties().isMaterializedView()) {
        // generate a DDL task and make it a dependent task of the leaf
        CreateViewDesc viewDesc = pCtx.getCreateViewDesc();
        Task<? extends Serializable> crtViewTask = TaskFactory.get(new DDLWork(inputs, outputs, viewDesc));
        patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask);
    } else if (pCtx.getMaterializedViewUpdateDesc() != null) {
        // If there is a materialized view update desc, we create introduce it at the end
        // of the tree.
        MaterializedViewDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc();
        Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
        getLeafTasks(rootTasks, leafTasks);
        Task<? extends Serializable> materializedViewTask = TaskFactory.get(materializedViewDesc, conf);
        for (Task<? extends Serializable> task : leafTasks) {
            task.addDependentTask(materializedViewTask);
        }
    }
    if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
        LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit());
        pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit());
    }
    if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) {
        LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
        globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());
    }
    Interner<TableDesc> interner = Interners.newStrongInterner();
    for (Task<? extends Serializable> rootTask : rootTasks) {
        GenMapRedUtils.internTableDesc(rootTask, interner);
        GenMapRedUtils.deriveFinalExplainAttributes(rootTask, pCtx.getConf());
    }
}
Also used : MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LinkedHashSet(java.util.LinkedHashSet) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) DDLTask(org.apache.hadoop.hive.ql.exec.DDLTask) Task(org.apache.hadoop.hive.ql.exec.Task) StatsTask(org.apache.hadoop.hive.ql.exec.StatsTask) Serializable(java.io.Serializable) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) MaterializedViewDesc(org.apache.hadoop.hive.ql.exec.MaterializedViewDesc) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) StatsTask(org.apache.hadoop.hive.ql.exec.StatsTask) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) ThriftFormatter(org.apache.hadoop.hive.serde2.thrift.ThriftFormatter) CreateViewDesc(org.apache.hadoop.hive.ql.plan.CreateViewDesc) ThriftJDBCBinarySerDe(org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) Context(org.apache.hadoop.hive.ql.Context) AnalyzeRewriteContext(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) DefaultFetchFormatter(org.apache.hadoop.hive.serde2.DefaultFetchFormatter) FetchWork(org.apache.hadoop.hive.ql.plan.FetchWork) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc)

Example 7 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class ImportSemanticAnalyzer method loadTable.

private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, boolean isSourceMm) {
    Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
    Path destPath = null, loadPath = null;
    LoadFileType lft;
    if (AcidUtils.isInsertOnlyTable(table)) {
        String mmSubdir = replace ? AcidUtils.baseDir(writeId) : AcidUtils.deltaSubdir(writeId, writeId, stmtId);
        destPath = new Path(tgtPath, mmSubdir);
        loadPath = tgtPath;
        lft = LoadFileType.KEEP_EXISTING;
    } else {
        destPath = loadPath = x.getCtx().getExternalTmpPath(tgtPath);
        lft = replace ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
    }
    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
        Utilities.FILE_OP_LOGGER.trace("adding import work for table with source location: " + dataPath + "; table: " + tgtPath + "; copy destination " + destPath + "; mm " + writeId + " (src " + isSourceMm + ") for " + (table == null ? "a new table" : table.getTableName()));
    }
    Task<?> copyTask = null;
    if (replicationSpec.isInReplicationScope()) {
        if (isSourceMm || isAcid(writeId)) {
            // Note: this is replication gap, not MM gap... Repl V2 is not ready yet.
            throw new RuntimeException("Replicating MM and ACID tables is not supported");
        }
        copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf());
    } else {
        CopyWork cw = new CopyWork(dataPath, destPath, false);
        cw.setSkipSourceMmDirs(isSourceMm);
        copyTask = TaskFactory.get(cw);
    }
    LoadTableDesc loadTableWork = new LoadTableDesc(loadPath, Utilities.getTableDesc(table), new TreeMap<>(), lft, writeId);
    loadTableWork.setStmtId(stmtId);
    MoveWork mv = new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork, null, false);
    Task<?> loadTableTask = TaskFactory.get(mv);
    copyTask.addDependentTask(loadTableTask);
    x.getTasks().add(copyTask);
    return loadTableTask;
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) CopyWork(org.apache.hadoop.hive.ql.plan.CopyWork)

Example 8 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class AlterTableAddPartitionAnalyzer method postProcess.

/**
 * Add partition for Transactional tables needs to add (copy/rename) the data so that it lands
 * in a delta_x_x/ folder in the partition dir.
 */
@Override
protected void postProcess(TableName tableName, Table table, AlterTableAddPartitionDesc desc, Task<DDLWork> ddlTask) throws SemanticException {
    if (!AcidUtils.isTransactionalTable(table)) {
        return;
    }
    setAcidDdlDesc(desc);
    Long writeId = null;
    int stmtId = 0;
    for (AlterTableAddPartitionDesc.PartitionDesc partitonDesc : desc.getPartitions()) {
        if (partitonDesc.getLocation() != null) {
            AcidUtils.validateAcidPartitionLocation(partitonDesc.getLocation(), conf);
            if (desc.isIfNotExists()) {
                // Don't add partition data if it already exists
                Partition oldPart = PartitionUtils.getPartition(db, table, partitonDesc.getPartSpec(), false);
                if (oldPart != null) {
                    continue;
                }
            }
            if (writeId == null) {
                // so that we only allocate a writeId if actually adding data (vs. adding a partition w/o data)
                try {
                    writeId = getTxnMgr().getTableWriteId(table.getDbName(), table.getTableName());
                } catch (LockException ex) {
                    throw new SemanticException("Failed to allocate the write id", ex);
                }
                stmtId = getTxnMgr().getStmtIdAndIncrement();
            }
            LoadTableDesc loadTableWork = new LoadTableDesc(new Path(partitonDesc.getLocation()), Utilities.getTableDesc(table), partitonDesc.getPartSpec(), // not relevant - creating new partition
            LoadTableDesc.LoadFileType.KEEP_EXISTING, writeId);
            loadTableWork.setStmtId(stmtId);
            loadTableWork.setInheritTableSpecs(true);
            try {
                partitonDesc.setLocation(new Path(table.getDataLocation(), Warehouse.makePartPath(partitonDesc.getPartSpec())).toString());
            } catch (MetaException ex) {
                throw new SemanticException("Could not determine partition path due to: " + ex.getMessage(), ex);
            }
            Task<MoveWork> moveTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, // make sure to check format
            true, // is this right?
            false));
            ddlTask.addDependentTask(moveTask);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) Partition(org.apache.hadoop.hive.ql.metadata.Partition) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 9 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class LoadTable method loadTableTask.

private Task<?> loadTableTask(Table table, ReplicationSpec replicationSpec, Path tgtPath, Path fromURI) {
    Path dataPath = fromURI;
    Path tmpPath = tgtPath;
    // if acid tables, copy the files directly to the target path. No need to create the staging dir.
    LoadFileType loadFileType;
    if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(table)) {
        loadFileType = LoadFileType.IGNORE;
    } else {
        loadFileType = (replicationSpec.isReplace()) ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
        tmpPath = PathUtils.getExternalTmpPath(tgtPath, context.pathInfo);
    }
    LOG.debug("adding dependent CopyWork/AddPart/MoveWork for table " + table.getCompleteName() + " with source location: " + dataPath.toString() + " and target location " + tgtPath.toString());
    boolean copyAtLoad = context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
    Task<?> copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, tmpPath, context.hiveConf, copyAtLoad, false, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
    MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector, true);
    if (AcidUtils.isTransactionalTable(table)) {
        LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(Collections.singletonList(tmpPath), Collections.singletonList(tgtPath), true, null, null);
        moveWork.setMultiFilesDesc(loadFilesWork);
    } else {
        LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, Utilities.getTableDesc(table), new TreeMap<>(), loadFileType, 0L);
        moveWork.setLoadTableWork(loadTableWork);
    }
    moveWork.setIsInReplicationScope(replicationSpec.isInReplicationScope());
    Task<?> loadTableTask = TaskFactory.get(moveWork, context.hiveConf);
    copyTask.addDependentTask(loadTableTask);
    return copyTask;
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) LoadMultiFilesDesc(org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc)

Example 10 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class MoveTask method isLocal.

/*
   * Does the move task involve moving to a local file system
   */
public boolean isLocal() {
    LoadTableDesc tbd = work.getLoadTableWork();
    if (tbd != null) {
        return false;
    }
    LoadFileDesc lfd = work.getLoadFileWork();
    if (lfd != null) {
        if (lfd.getIsDfsDir()) {
            return false;
        } else {
            return true;
        }
    }
    return false;
}
Also used : LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc)

Aggregations

LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)29 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)20 Path (org.apache.hadoop.fs.Path)17 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 LoadFileDesc (org.apache.hadoop.hive.ql.plan.LoadFileDesc)9 ArrayList (java.util.ArrayList)8 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)8 Partition (org.apache.hadoop.hive.ql.metadata.Partition)8 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)8 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)7 Table (org.apache.hadoop.hive.ql.metadata.Table)7 IOException (java.io.IOException)6 HashMap (java.util.HashMap)6 FileStatus (org.apache.hadoop.fs.FileStatus)6 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)5 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)5 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)5 DynamicPartitionCtx (org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx)5 LoadFileType (org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType)5 LinkedHashMap (java.util.LinkedHashMap)4