Search in sources :

Example 21 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class ImportSemanticAnalyzer method addSinglePartition.

private static Task<?> addSinglePartition(URI fromURI, FileSystem fs, ImportTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, boolean isSourceMm, Task<?> commitTask) throws MetaException, IOException, HiveException {
    AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
    if (tblDesc.isExternal() && tblDesc.getLocation() == null) {
        x.getLOG().debug("Importing in-place: adding AddPart for partition " + partSpecToString(partSpec.getPartSpec()));
        // addPartitionDesc already has the right partition location
        @SuppressWarnings("unchecked") Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc));
        return addPartTask;
    } else {
        String srcLocation = partSpec.getLocation();
        fixLocationInPartSpec(fs, tblDesc, table, wh, replicationSpec, partSpec, x);
        x.getLOG().debug("adding dependent CopyWork/AddPart/MoveWork for partition " + partSpecToString(partSpec.getPartSpec()) + " with source location: " + srcLocation);
        Path tgtLocation = new Path(partSpec.getLocation());
        Path destPath = !AcidUtils.isInsertOnlyTable(table.getParameters()) ? x.getCtx().getExternalTmpPath(tgtLocation) : new Path(tgtLocation, AcidUtils.deltaSubdir(writeId, writeId, stmtId));
        Path moveTaskSrc = !AcidUtils.isInsertOnlyTable(table.getParameters()) ? destPath : tgtLocation;
        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
            Utilities.FILE_OP_LOGGER.trace("adding import work for partition with source location: " + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm " + writeId + " (src " + isSourceMm + ") for " + partSpecToString(partSpec.getPartSpec()));
        }
        Task<?> copyTask = null;
        if (replicationSpec.isInReplicationScope()) {
            if (isSourceMm || isAcid(writeId)) {
                // Note: this is replication gap, not MM gap... Repl V2 is not ready yet.
                throw new RuntimeException("Replicating MM and ACID tables is not supported");
            }
            copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, new Path(srcLocation), destPath, x.getConf());
        } else {
            CopyWork cw = new CopyWork(new Path(srcLocation), destPath, false);
            cw.setSkipSourceMmDirs(isSourceMm);
            copyTask = TaskFactory.get(cw);
        }
        Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc));
        // Note: this sets LoadFileType incorrectly for ACID; is that relevant for import?
        // See setLoadFileType and setIsAcidIow calls elsewhere for an example.
        LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table), partSpec.getPartSpec(), replicationSpec.isReplace() ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING, writeId);
        loadTableWork.setStmtId(stmtId);
        loadTableWork.setInheritTableSpecs(false);
        Task<?> loadPartTask = TaskFactory.get(new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork, null, false));
        copyTask.addDependentTask(loadPartTask);
        addPartTask.addDependentTask(loadPartTask);
        x.getTasks().add(copyTask);
        if (commitTask != null) {
            loadPartTask.addDependentTask(commitTask);
        }
        return addPartTask;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) CopyWork(org.apache.hadoop.hive.ql.plan.CopyWork) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc)

Example 22 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class LoadSemanticAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
    boolean isLocal = false;
    boolean isOverWrite = false;
    Tree fromTree = ast.getChild(0);
    Tree tableTree = ast.getChild(1);
    if (ast.getChildCount() == 4) {
        isLocal = true;
        isOverWrite = true;
    }
    if (ast.getChildCount() == 3) {
        if (ast.getChild(2).getText().toLowerCase().equals("local")) {
            isLocal = true;
        } else {
            isOverWrite = true;
        }
    }
    // initialize load path
    URI fromURI;
    try {
        String fromPath = stripQuotes(fromTree.getText());
        fromURI = initializeFromURI(fromPath, isLocal);
    } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(fromTree, e.getMessage()), e);
    } catch (URISyntaxException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(fromTree, e.getMessage()), e);
    }
    // initialize destination table/partition
    TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
    if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
        throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
    }
    if (ts.tableHandle.isNonNative()) {
        throw new SemanticException(ErrorMsg.LOAD_INTO_NON_NATIVE.getMsg());
    }
    if (ts.tableHandle.isStoredAsSubDirectories()) {
        throw new SemanticException(ErrorMsg.LOAD_INTO_STORED_AS_DIR.getMsg());
    }
    List<FieldSchema> parts = ts.tableHandle.getPartitionKeys();
    if ((parts != null && parts.size() > 0) && (ts.partSpec == null || ts.partSpec.size() == 0)) {
        throw new SemanticException(ErrorMsg.NEED_PARTITION_ERROR.getMsg());
    }
    List<String> bucketCols = ts.tableHandle.getBucketCols();
    if (bucketCols != null && !bucketCols.isEmpty()) {
        String error = StrictChecks.checkBucketing(conf);
        if (error != null) {
            throw new SemanticException("Please load into an intermediate table" + " and use 'insert... select' to allow Hive to enforce bucketing. " + error);
        }
    }
    // make sure the arguments make sense
    List<FileStatus> files = applyConstraintsAndGetFiles(fromURI, fromTree, isLocal, ts.tableHandle);
    // for managed tables, make sure the file formats match
    if (TableType.MANAGED_TABLE.equals(ts.tableHandle.getTableType()) && conf.getBoolVar(HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
        ensureFileFormatsMatch(ts, files, fromURI);
    }
    inputs.add(toReadEntity(new Path(fromURI)));
    Task<? extends Serializable> rTask = null;
    // create final load/move work
    boolean preservePartitionSpecs = false;
    Map<String, String> partSpec = ts.getPartSpec();
    if (partSpec == null) {
        partSpec = new LinkedHashMap<String, String>();
        outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
    } else {
        try {
            Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
            if (part != null) {
                if (isOverWrite) {
                    outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE));
                } else {
                    outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT));
                    // If partition already exists and we aren't overwriting it, then respect
                    // its current location info rather than picking it from the parent TableDesc
                    preservePartitionSpecs = true;
                }
            } else {
                outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
            }
        } catch (HiveException e) {
            throw new SemanticException(e);
        }
    }
    Long writeId = null;
    int stmtId = -1;
    if (AcidUtils.isTransactionalTable(ts.tableHandle)) {
        try {
            writeId = SessionState.get().getTxnMgr().getTableWriteId(ts.tableHandle.getDbName(), ts.tableHandle.getTableName());
        } catch (LockException ex) {
            throw new SemanticException("Failed to allocate the write id", ex);
        }
        stmtId = SessionState.get().getTxnMgr().getStmtIdAndIncrement();
    }
    // Note: this sets LoadFileType incorrectly for ACID; is that relevant for load?
    // See setLoadFileType and setIsAcidIow calls elsewhere for an example.
    LoadTableDesc loadTableWork = new LoadTableDesc(new Path(fromURI), Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING, writeId);
    loadTableWork.setStmtId(stmtId);
    if (preservePartitionSpecs) {
        // Note : preservePartitionSpecs=true implies inheritTableSpecs=false but
        // but preservePartitionSpecs=false(default) here is not sufficient enough
        // info to set inheritTableSpecs=true
        loadTableWork.setInheritTableSpecs(false);
    }
    Task<? extends Serializable> childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal));
    if (rTask != null) {
        rTask.addDependentTask(childTask);
    } else {
        rTask = childTask;
    }
    rootTasks.add(rTask);
    // The user asked for stats to be collected.
    // Some stats like number of rows require a scan of the data
    // However, some other stats, like number of files, do not require a complete scan
    // Update the stats which do not require a complete scan.
    Task<? extends Serializable> statTask = null;
    if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
        BasicStatsWork basicStatsWork = new BasicStatsWork(loadTableWork);
        basicStatsWork.setNoStatsAggregator(true);
        basicStatsWork.setClearAggregatorStats(true);
        StatsWork columnStatsWork = new StatsWork(ts.tableHandle, basicStatsWork, conf);
        statTask = TaskFactory.get(columnStatsWork);
    }
    if (statTask != null) {
        childTask.addDependentTask(statTask);
    }
}
Also used : MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) FileStatus(org.apache.hadoop.fs.FileStatus) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) BasicStatsWork(org.apache.hadoop.hive.ql.plan.BasicStatsWork) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) Tree(org.antlr.runtime.tree.Tree) BasicStatsWork(org.apache.hadoop.hive.ql.plan.BasicStatsWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) IOException(java.io.IOException) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc)

Example 23 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class MoveTask method acquireLockForFileMove.

private LocalTableLock acquireLockForFileMove(LoadTableDesc loadTableWork) throws HiveException {
    LockFileMoveMode mode = LockFileMoveMode.fromConf(conf);
    if (mode == LockFileMoveMode.NONE) {
        return new LocalTableLock();
    }
    if (mode == LockFileMoveMode.DP && loadTableWork.getDPCtx() == null) {
        return new LocalTableLock();
    }
    WriteEntity output = context.getLoadTableOutputMap().get(loadTableWork);
    List<HiveLockObj> lockObjects = context.getOutputLockObjects().get(output);
    if (lockObjects == null) {
        return new LocalTableLock();
    }
    TableDesc table = loadTableWork.getTable();
    if (table == null) {
        return new LocalTableLock();
    }
    Hive db = getHive();
    Table baseTable = db.getTable(loadTableWork.getTable().getTableName());
    HiveLockObject.HiveLockObjectData lockData = new HiveLockObject.HiveLockObjectData(queryPlan.getQueryId(), String.valueOf(System.currentTimeMillis()), "IMPLICIT", queryPlan.getQueryStr(), conf);
    HiveLockObject lock = new HiveLockObject(baseTable, lockData);
    for (HiveLockObj hiveLockObj : lockObjects) {
        if (Arrays.equals(hiveLockObj.getObj().getPaths(), lock.getPaths())) {
            HiveLockMode l = hiveLockObj.getMode();
            if (l == HiveLockMode.EXCLUSIVE || l == HiveLockMode.SEMI_SHARED) {
                // no need to lock ; already owns a more powerful one
                return new LocalTableLock();
            }
        }
    }
    return new LocalTableLock(lock);
}
Also used : Hive(org.apache.hadoop.hive.ql.metadata.Hive) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveLockObject(org.apache.hadoop.hive.ql.lockmgr.HiveLockObject) HiveLockObj(org.apache.hadoop.hive.ql.lockmgr.HiveLockObj) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) HiveLockMode(org.apache.hadoop.hive.ql.lockmgr.HiveLockMode)

Example 24 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class BasicStatsTask method getPartitionsList.

/**
 * Get the list of partitions that need to update statistics.
 * TODO: we should reuse the Partitions generated at compile time
 * since getting the list of partitions is quite expensive.
 *
 * @return a list of partitions that need to update statistics.
 * @throws HiveException
 */
private List<Partition> getPartitionsList(Hive db) throws HiveException {
    if (work.getLoadFileDesc() != null) {
        // we are in CTAS, so we know there are no partitions
        return null;
    }
    if (work.getTableSpecs() != null) {
        // ANALYZE command
        TableSpec tblSpec = work.getTableSpecs();
        table = tblSpec.tableHandle;
        if (!table.isPartitioned()) {
            return null;
        }
        // get all partitions that matches with the partition spec
        return tblSpec.partitions != null ? unmodifiableList(tblSpec.partitions) : emptyList();
    } else if (work.getLoadTableDesc() != null) {
        // INSERT OVERWRITE command
        LoadTableDesc tbd = work.getLoadTableDesc();
        table = db.getTable(tbd.getTable().getTableName());
        if (!table.isPartitioned()) {
            return null;
        }
        DynamicPartitionCtx dpCtx = tbd.getDPCtx();
        if (dpCtx != null && dpCtx.getNumDPCols() > 0) {
            // If no dynamic partitions are generated, dpPartSpecs may not be initialized
            if (dpPartSpecs != null) {
                // Reload partition metadata because another BasicStatsTask instance may have updated the stats.
                List<String> partNames = dpPartSpecs.stream().map(Partition::getName).collect(Collectors.toList());
                return db.getPartitionsByNames(table, partNames);
            }
        } else {
            // static partition
            return singletonList(db.getPartition(table, tbd.getPartitionSpec(), false));
        }
    }
    return emptyList();
}
Also used : LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) Partition(org.apache.hadoop.hive.ql.metadata.Partition) TableSpec(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) Collections.unmodifiableList(java.util.Collections.unmodifiableList) ArrayList(java.util.ArrayList) Collections.singletonList(java.util.Collections.singletonList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List)

Example 25 with LoadTableDesc

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.

the class Hive method loadDynamicPartitions.

/**
 * Given a source directory name of the load path, load all dynamically generated partitions
 * into the specified table and return a list of strings that represent the dynamic partition
 * paths.
 * @param tbd table descriptor
 * @param numLB number of buckets
 * @param isAcid true if this is an ACID operation
 * @param writeId writeId, can be 0 unless isAcid == true
 * @param stmtId statementId
 * @param resetStatistics if true, reset statistics. Do not reset statistics otherwise.
 * @param operation ACID operation type
 * @param partitionDetailsMap full dynamic partition specification
 * @return partition map details (PartitionSpec and Partition)
 * @throws HiveException
 */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final LoadTableDesc tbd, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean resetStatistics, final AcidUtils.Operation operation, Map<Path, PartitionDetails> partitionDetailsMap) throws HiveException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
    final Path loadPath = tbd.getSourcePath();
    final Table tbl = getTable(tbd.getTable().getTableName());
    final Map<String, String> partSpec = tbd.getPartitionSpec();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);
    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();
    List<Callable<Partition>> tasks = Lists.newLinkedList();
    boolean fetchPartitionInfo = true;
    final boolean scanPartitionsByName = HiveConf.getBoolVar(conf, HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS);
    // for every dynamic partition
    if (scanPartitionsByName && !tbd.isDirectInsert() && !AcidUtils.isTransactionalTable(tbl)) {
        // Fetch only relevant partitions from HMS for checking old partitions
        List<String> partitionNames = new LinkedList<>();
        for (PartitionDetails details : partitionDetailsMap.values()) {
            if (details.fullSpec != null && !details.fullSpec.isEmpty()) {
                partitionNames.add(Warehouse.makeDynamicPartNameNoTrailingSeperator(details.fullSpec));
            }
        }
        List<Partition> partitions = Hive.get().getPartitionsByNames(tbl, partitionNames);
        for (Partition partition : partitions) {
            LOG.debug("HMS partition spec: {}", partition.getSpec());
            partitionDetailsMap.entrySet().parallelStream().filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())).findAny().ifPresent(entry -> {
                entry.getValue().partition = partition;
                entry.getValue().hasOldPartition = true;
            });
        }
        // no need to fetch partition again in tasks since we have already fetched partitions
        // info in getPartitionsByNames()
        fetchPartitionInfo = false;
    }
    boolean isTxnTable = AcidUtils.isTransactionalTable(tbl);
    AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null;
    for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
        boolean getPartitionFromHms = fetchPartitionInfo;
        tasks.add(() -> {
            PartitionDetails partitionDetails = entry.getValue();
            Map<String, String> fullPartSpec = partitionDetails.fullSpec;
            try {
                SessionState.setCurrentSessionState(parentSession);
                if (getPartitionFromHms) {
                    // didn't fetch partition info from HMS. Getting from HMS now.
                    Partition existing = getPartition(tbl, fullPartSpec, false);
                    if (existing != null) {
                        partitionDetails.partition = existing;
                        partitionDetails.hasOldPartition = true;
                    }
                }
                LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec);
                Partition oldPartition = partitionDetails.partition;
                List<FileStatus> newFiles = null;
                if (partitionDetails.newFiles != null) {
                    // If we already know the files from the direct insert manifest, use them
                    newFiles = partitionDetails.newFiles;
                } else if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPartition == null) {
                    // Otherwise only collect them, if we are going to fire write notifications
                    newFiles = Collections.synchronizedList(new ArrayList<>());
                }
                // load the partition
                Partition partition = loadPartitionInternal(entry.getKey(), tbl, fullPartSpec, oldPartition, tbd.getLoadFileType(), true, false, numLB > 0, false, isAcid, resetStatistics, writeId, stmtId, tbd.isInsertOverwrite(), isTxnTable, newFiles, tbd.isDirectInsert());
                // metastore
                if (tableSnapshot != null) {
                    partition.getTPartition().setWriteId(tableSnapshot.getWriteId());
                }
                partitionDetails.tableSnapshot = tableSnapshot;
                if (oldPartition == null) {
                    partitionDetails.newFiles = newFiles;
                    partitionDetails.partition = partition;
                }
                if (inPlaceEligible) {
                    synchronized (ps) {
                        InPlaceUpdate.rePositionCursor(ps);
                        partitionsLoaded.incrementAndGet();
                        InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partitionDetailsMap.size() + " partitions.");
                    }
                }
                return partition;
            } catch (Exception e) {
                LOG.error("Exception when loading partition with parameters " + " partPath=" + entry.getKey() + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " loadFileType=" + tbd.getLoadFileType().toString() + ", " + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " resetStatistics=" + resetStatistics, e);
                throw e;
            } finally {
                // get(conf).getMSC can be called in this task, Close the HMS connection right after use, do not wait for finalizer to close it.
                closeCurrent();
            }
        });
    }
    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    ExecutorService executor = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build());
    List<Future<Partition>> futures = Lists.newLinkedList();
    Map<Map<String, String>, Partition> result = Maps.newLinkedHashMap();
    try {
        futures = executor.invokeAll(tasks);
        LOG.info("Number of partitionsToAdd to be added is " + futures.size());
        for (Future<Partition> future : futures) {
            Partition partition = future.get();
            result.put(partition.getSpec(), partition);
        }
        // add new partitions in batch
        addPartitionsToMetastore(partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().tableSnapshot).collect(Collectors.toList()));
        // For acid table, add the acid_write event with file list at the time of load itself. But
        // it should be done after partition is created.
        List<WriteNotificationLogRequest> requestList = new ArrayList<>();
        int maxBatchSize = conf.getIntVar(HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE);
        for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
            PartitionDetails partitionDetails = entry.getValue();
            if (isTxnTable && partitionDetails.newFiles != null) {
                addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId, requestList);
                if (requestList != null && requestList.size() >= maxBatchSize) {
                    // If the first call returns that the HMS does not supports batching, avoid batching
                    // for later requests.
                    boolean batchSupported = addWriteNotificationLogInBatch(tbl, requestList);
                    if (batchSupported) {
                        requestList.clear();
                    } else {
                        requestList = null;
                    }
                }
            }
        }
        if (requestList != null && requestList.size() > 0) {
            addWriteNotificationLogInBatch(tbl, requestList);
        }
        setStatsPropAndAlterPartitions(resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), tableSnapshot);
    } catch (InterruptedException | ExecutionException e) {
        throw new HiveException("Exception when loading " + partitionDetailsMap.size() + " partitions" + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
    } catch (TException e) {
        LOG.error("Failed loadDynamicPartitions", e);
        throw new HiveException(e);
    } catch (Exception e) {
        StringBuffer logMsg = new StringBuffer();
        logMsg.append("Exception when loading partitionsToAdd with parameters ");
        logMsg.append("partPaths=");
        partitionDetailsMap.keySet().forEach(path -> logMsg.append(path + ", "));
        logMsg.append("table=" + tbl.getTableName() + ", ").append("partSpec=" + partSpec + ", ").append("loadFileType=" + tbd.getLoadFileType().toString() + ", ").append("listBucketingLevel=" + numLB + ", ").append("isAcid=" + isAcid + ", ").append("resetStatistics=" + resetStatistics);
        LOG.error(logMsg.toString(), e);
        throw e;
    } finally {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        executor.shutdownNow();
    }
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION)) {
        throw new HiveException(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION.name() + "=true");
    }
    try {
        if (isTxnTable) {
            List<String> partNames = result.values().stream().map(Partition::getName).collect(Collectors.toList());
            getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + result.size() + "partitionsToAdd");
        perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
        return result;
    } catch (TException te) {
        LOG.error("Failed loadDynamicPartitions", te);
        throw new HiveException("Exception updating metastore for acid table " + tbd.getTable().getTableName() + " with partitions " + result.values(), te);
    }
}
Also used : MetaStoreUtils.getDefaultCatalog(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog) CALCITE(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.CALCITE) FileSystem(org.apache.hadoop.fs.FileSystem) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Future(java.util.concurrent.Future) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) GetPartitionRequest(org.apache.hadoop.hive.metastore.api.GetPartitionRequest) DefaultConstraintsRequest(org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest) EnumSet(java.util.EnumSet) SQLAllTableConstraints(org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) FileChecksum(org.apache.hadoop.fs.FileChecksum) CheckConstraintsRequest(org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest) WMMapping(org.apache.hadoop.hive.metastore.api.WMMapping) HiveMaterializedViewUtils(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils) AcidUtils.getFullTableName(org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName) SetPartitionsStatsRequest(org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) ForeignKeysRequest(org.apache.hadoop.hive.metastore.api.ForeignKeysRequest) FileUtils(org.apache.hadoop.hive.common.FileUtils) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) HdfsUtils(org.apache.hadoop.hive.ql.io.HdfsUtils) InsertEventRequestData(org.apache.hadoop.hive.metastore.api.InsertEventRequestData) Role(org.apache.hadoop.hive.metastore.api.Role) CollectionUtils(org.apache.commons.collections4.CollectionUtils) CmRecycleRequest(org.apache.hadoop.hive.metastore.api.CmRecycleRequest) Lists(com.google.common.collect.Lists) MATERIALIZED_VIEW_REWRITING_TIME_WINDOW(org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Unstable(org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) ReplChangeManager(org.apache.hadoop.hive.metastore.ReplChangeManager) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) AllTableConstraintsRequest(org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) IOException(java.io.IOException) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) MetaStoreServerUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) Preconditions(com.google.common.base.Preconditions) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) PrimaryKeysRequest(org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SerializationUtilities(org.apache.hadoop.hive.ql.exec.SerializationUtilities) HiveObjectType(org.apache.hadoop.hive.metastore.api.HiveObjectType) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CompactionType(org.apache.hadoop.hive.metastore.api.CompactionType) GetPartitionNamesPsResponse(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse) ColumnStatisticsDesc(org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc) SynchronizedMetaStoreClient(org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InPlaceUpdate(org.apache.hadoop.hive.common.log.InPlaceUpdate) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) GetPartitionsByNamesRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest) WMNullableResourcePlan(org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FireEventRequest(org.apache.hadoop.hive.metastore.api.FireEventRequest) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) CompactionResponse(org.apache.hadoop.hive.metastore.api.CompactionResponse) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) DataConnector(org.apache.hadoop.hive.metastore.api.DataConnector) Entry(java.util.Map.Entry) RetryingMetaStoreClient(org.apache.hadoop.hive.metastore.RetryingMetaStoreClient) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) FilenameUtils(org.apache.commons.io.FilenameUtils) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) WMNullablePool(org.apache.hadoop.hive.metastore.api.WMNullablePool) PathFilter(org.apache.hadoop.fs.PathFilter) WriteNotificationLogBatchRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest) HashSet(java.util.HashSet) ListBucketingPrunerUtils(org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils) PartitionsByExprRequest(org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest) SERIALIZATION_FORMAT(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT) ImmutableList(com.google.common.collect.ImmutableList) UpdateTransactionalStatsRequest(org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest) StringUtils(org.apache.hadoop.util.StringUtils) LinkedList(java.util.LinkedList) TApplicationException(org.apache.thrift.TApplicationException) ExecutorService(java.util.concurrent.ExecutorService) AbstractFileMergeOperator(org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator) PrintStream(java.io.PrintStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Logger(org.slf4j.Logger) HiveMetaHookLoader(org.apache.hadoop.hive.metastore.HiveMetaHookLoader) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveVersionInfo(org.apache.hive.common.util.HiveVersionInfo) HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) ShimLoader(org.apache.hadoop.hive.shims.ShimLoader) BitSet(java.util.BitSet) PartitionDropOptions(org.apache.hadoop.hive.metastore.PartitionDropOptions) NotNullConstraintsRequest(org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest) Arrays(java.util.Arrays) WMResourcePlan(org.apache.hadoop.hive.metastore.api.WMResourcePlan) GetPartitionResponse(org.apache.hadoop.hive.metastore.api.GetPartitionResponse) CreateTableAutomaticGrant(org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant) HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE) InetAddress(java.net.InetAddress) UniqueConstraintsRequest(org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) GetPartitionsPsWithAuthResponse(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse) Set(java.util.Set) STRING_TYPE_NAME(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME) Executors(java.util.concurrent.Executors) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Callable(java.util.concurrent.Callable) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) LinkedHashSet(java.util.LinkedHashSet) Nullable(javax.annotation.Nullable) PrincipalPrivilegeSet(org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet) Materialization(org.apache.hadoop.hive.metastore.api.Materialization) GetPartitionsPsWithAuthRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest) SOFT_DELETE_TABLE(org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE) FunctionUtils(org.apache.hadoop.hive.ql.exec.FunctionUtils) TException(org.apache.thrift.TException) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) TableType(org.apache.hadoop.hive.metastore.TableType) HiveObjectRef(org.apache.hadoop.hive.metastore.api.HiveObjectRef) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) GetRoleGrantsForPrincipalResponse(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse) LoggerFactory(org.slf4j.LoggerFactory) RolePrincipalGrant(org.apache.hadoop.hive.metastore.api.RolePrincipalGrant) ByteBuffer(java.nio.ByteBuffer) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ALL(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL) HiveStatsUtils(org.apache.hadoop.hive.common.HiveStatsUtils) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList) FireEventRequestData(org.apache.hadoop.hive.metastore.api.FireEventRequestData) Splitter(com.google.common.base.Splitter) PrivilegeBag(org.apache.hadoop.hive.metastore.api.PrivilegeBag) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) ImmutableMap(com.google.common.collect.ImmutableMap) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) HiveMetaHook(org.apache.hadoop.hive.metastore.HiveMetaHook) AggrStats(org.apache.hadoop.hive.metastore.api.AggrStats) Sets(com.google.common.collect.Sets) FileNotFoundException(java.io.FileNotFoundException) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) List(java.util.List) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) MetadataPpdResult(org.apache.hadoop.hive.metastore.api.MetadataPpdResult) Optional(java.util.Optional) HiveMetaStoreUtils(org.apache.hadoop.hive.metastore.HiveMetaStoreUtils) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) HiveObjectPrivilege(org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) GetRoleGrantsForPrincipalRequest(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest) HashMap(java.util.HashMap) GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest) LimitedPrivate(org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) JDODataStoreException(javax.jdo.JDODataStoreException) ObjectUtils(org.apache.commons.lang3.ObjectUtils) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) Iterator(java.util.Iterator) WMValidateResourcePlanResponse(org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) Maps(com.google.common.collect.Maps) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) META_TABLE_STORAGE(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) GetPartitionNamesPsRequest(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) Function(org.apache.hadoop.hive.metastore.api.Function) TException(org.apache.thrift.TException) SessionState(org.apache.hadoop.hive.ql.session.SessionState) FileStatus(org.apache.hadoop.fs.FileStatus) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ExecutionException(java.util.concurrent.ExecutionException) Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) LinkedList(java.util.LinkedList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) TApplicationException(org.apache.thrift.TApplicationException) TException(org.apache.thrift.TException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) Future(java.util.concurrent.Future) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Aggregations

LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)29 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)20 Path (org.apache.hadoop.fs.Path)17 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 LoadFileDesc (org.apache.hadoop.hive.ql.plan.LoadFileDesc)9 ArrayList (java.util.ArrayList)8 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)8 Partition (org.apache.hadoop.hive.ql.metadata.Partition)8 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)8 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)7 Table (org.apache.hadoop.hive.ql.metadata.Table)7 IOException (java.io.IOException)6 HashMap (java.util.HashMap)6 FileStatus (org.apache.hadoop.fs.FileStatus)6 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)5 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)5 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)5 DynamicPartitionCtx (org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx)5 LoadFileType (org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType)5 LinkedHashMap (java.util.LinkedHashMap)4