Search in sources :

Example 1 with LoadFileType

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType in project hive by apache.

the class ImportSemanticAnalyzer method loadTable.

private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, boolean isSourceMm) {
    Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
    Path destPath = null, loadPath = null;
    LoadFileType lft;
    if (AcidUtils.isInsertOnlyTable(table)) {
        String mmSubdir = replace ? AcidUtils.baseDir(writeId) : AcidUtils.deltaSubdir(writeId, writeId, stmtId);
        destPath = new Path(tgtPath, mmSubdir);
        loadPath = tgtPath;
        lft = LoadFileType.KEEP_EXISTING;
    } else {
        destPath = loadPath = x.getCtx().getExternalTmpPath(tgtPath);
        lft = replace ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
    }
    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
        Utilities.FILE_OP_LOGGER.trace("adding import work for table with source location: " + dataPath + "; table: " + tgtPath + "; copy destination " + destPath + "; mm " + writeId + " (src " + isSourceMm + ") for " + (table == null ? "a new table" : table.getTableName()));
    }
    Task<?> copyTask = null;
    if (replicationSpec.isInReplicationScope()) {
        if (isSourceMm || isAcid(writeId)) {
            // Note: this is replication gap, not MM gap... Repl V2 is not ready yet.
            throw new RuntimeException("Replicating MM and ACID tables is not supported");
        }
        copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf());
    } else {
        CopyWork cw = new CopyWork(dataPath, destPath, false);
        cw.setSkipSourceMmDirs(isSourceMm);
        copyTask = TaskFactory.get(cw);
    }
    LoadTableDesc loadTableWork = new LoadTableDesc(loadPath, Utilities.getTableDesc(table), new TreeMap<>(), lft, writeId);
    loadTableWork.setStmtId(stmtId);
    MoveWork mv = new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork, null, false);
    Task<?> loadTableTask = TaskFactory.get(mv);
    copyTask.addDependentTask(loadTableTask);
    x.getTasks().add(copyTask);
    return loadTableTask;
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) CopyWork(org.apache.hadoop.hive.ql.plan.CopyWork)

Example 2 with LoadFileType

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType in project hive by apache.

the class LoadTable method loadTableTask.

private Task<?> loadTableTask(Table table, ReplicationSpec replicationSpec, Path tgtPath, Path fromURI) {
    Path dataPath = fromURI;
    Path tmpPath = tgtPath;
    // if acid tables, copy the files directly to the target path. No need to create the staging dir.
    LoadFileType loadFileType;
    if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(table)) {
        loadFileType = LoadFileType.IGNORE;
    } else {
        loadFileType = (replicationSpec.isReplace()) ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
        tmpPath = PathUtils.getExternalTmpPath(tgtPath, context.pathInfo);
    }
    LOG.debug("adding dependent CopyWork/AddPart/MoveWork for table " + table.getCompleteName() + " with source location: " + dataPath.toString() + " and target location " + tgtPath.toString());
    boolean copyAtLoad = context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
    Task<?> copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, tmpPath, context.hiveConf, copyAtLoad, false, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
    MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector, true);
    if (AcidUtils.isTransactionalTable(table)) {
        LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(Collections.singletonList(tmpPath), Collections.singletonList(tgtPath), true, null, null);
        moveWork.setMultiFilesDesc(loadFilesWork);
    } else {
        LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, Utilities.getTableDesc(table), new TreeMap<>(), loadFileType, 0L);
        moveWork.setLoadTableWork(loadTableWork);
    }
    moveWork.setIsInReplicationScope(replicationSpec.isInReplicationScope());
    Task<?> loadTableTask = TaskFactory.get(moveWork, context.hiveConf);
    copyTask.addDependentTask(loadTableTask);
    return copyTask;
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) LoadMultiFilesDesc(org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc)

Example 3 with LoadFileType

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType in project hive by apache.

the class SemanticAnalyzer method genFileSinkPlan.

@SuppressWarnings("nls")
protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException {
    RowResolver inputRR = opParseCtx.get(input).getRowResolver();
    QBMetaData qbm = qb.getMetaData();
    Integer destType = qbm.getDestTypeForAlias(dest);
    // destination table if any
    Table destinationTable = null;
    // true for full ACID table and MM table
    boolean destTableIsTransactional;
    // should the destination table be written to using ACID
    boolean destTableIsFullAcid;
    // should we add files directly to the final path
    boolean isDirectInsert = false;
    AcidUtils.Operation acidOperation = null;
    boolean destTableIsTemporary = false;
    boolean destTableIsMaterialization = false;
    // destination partition if any
    Partition destinationPartition = null;
    // the intermediate destination directory
    Path queryTmpdir = null;
    String moveTaskId = null;
    // the final destination directory
    Path destinationPath = null;
    TableDesc tableDescriptor = null;
    StructObjectInspector specificRowObjectInspector = null;
    int currentTableId = 0;
    boolean isLocal = false;
    SortBucketRSCtx rsCtx = new SortBucketRSCtx();
    DynamicPartitionCtx dpCtx = null;
    LoadTableDesc ltd = null;
    ListBucketingCtx lbCtx = null;
    Map<String, String> partSpec = null;
    boolean isMmTable = false, isMmCreate = false, isNonNativeTable = false;
    Long writeId = null;
    HiveTxnManager txnMgr = getTxnMgr();
    switch(destType.intValue()) {
        case QBMetaData.DEST_TABLE:
            {
                destinationTable = qbm.getDestTableForAlias(dest);
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                destTableIsTemporary = destinationTable.isTemporary();
                // Is the user trying to insert into a external tables
                checkExternalTable(destinationTable);
                partSpec = qbm.getPartSpecForAlias(dest);
                destinationPath = destinationTable.getPath();
                checkImmutableTable(qb, destinationTable, destinationPath, false);
                // check for partition
                List<FieldSchema> parts = destinationTable.getPartitionKeys();
                if (parts != null && parts.size() > 0) {
                    // table is partitioned
                    if (partSpec == null || partSpec.size() == 0) {
                        // user did NOT specify partition
                        throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
                    }
                    dpCtx = qbm.getDPCtx(dest);
                    if (dpCtx == null) {
                        destinationTable.validatePartColumnNames(partSpec, false);
                        dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                        qbm.setDPCtx(dest, dpCtx);
                    }
                }
                // Check for dynamic partitions.
                dpCtx = checkDynPart(qb, qbm, destinationTable, partSpec, dest);
                if (dpCtx != null && dpCtx.getSPPath() != null) {
                    destinationPath = new Path(destinationTable.getPath(), dpCtx.getSPPath());
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + destinationPath);
                }
                if (dpCtx != null) {
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, parts);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                // NOTE: specify Dynamic partitions in dest_tab for WriteEntity
                if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                        checkAcidConstraints();
                    } else {
                        lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(), destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(), destinationTable.isStoredAsSubDirectories());
                    }
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = null;
                        } else {
                            if (isMmTable) {
                                writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            } else {
                                writeId = acidOp == Operation.NOT_ACID ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            }
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    boolean isReplace = !qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, isReplace, writeId);
                    if (writeId != null) {
                        ltd.setStmtId(txnMgr.getCurrentStmtId());
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                    // deltas and base and leave them up to the cleaner to clean up
                    boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    LoadFileType loadType;
                    if (isDirectInsert) {
                        loadType = LoadFileType.IGNORE;
                    } else if (!isInsertInto && !destTableIsTransactional) {
                        loadType = LoadFileType.REPLACE_ALL;
                    } else {
                        loadType = LoadFileType.KEEP_EXISTING;
                    }
                    ltd.setLoadFileType(loadType);
                    ltd.setInsertOverwrite(!isInsertInto);
                    ltd.setIsDirectInsert(isDirectInsert);
                    ltd.setLbCtx(lbCtx);
                    loadTableWork.add(ltd);
                } else {
                    // This is a non-native table.
                    // We need to set stats as inaccurate.
                    setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName());
                    // true if it is insert overwrite.
                    boolean overwrite = !qb.getParseInfo().isInsertIntoTable(String.format("%s.%s", destinationTable.getDbName(), destinationTable.getTableName()));
                    createPreInsertDesc(destinationTable, overwrite);
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, partSpec == null ? ImmutableMap.of() : partSpec);
                    ltd.setInsertOverwrite(overwrite);
                    ltd.setLoadFileType(overwrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING);
                }
                if (destinationTable.isMaterializedView()) {
                    materializedViewUpdateDesc = new MaterializedViewUpdateDesc(destinationTable.getFullyQualifiedName(), false, false, true);
                }
                WriteEntity output = generateTableWriteEntity(dest, destinationTable, partSpec, ltd, dpCtx);
                ctx.getLoadTableOutputMap().put(ltd, output);
                break;
            }
        case QBMetaData.DEST_PARTITION:
            {
                destinationPartition = qbm.getDestPartitionForAlias(dest);
                destinationTable = destinationPartition.getTable();
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                checkExternalTable(destinationTable);
                Path partPath = destinationPartition.getDataLocation();
                checkImmutableTable(qb, destinationTable, partPath, true);
                // Previous behavior (HIVE-1707) used to replace the partition's dfs with the table's dfs.
                // The changes in HIVE-19891 appears to no longer support that behavior.
                destinationPath = partPath;
                if (MetaStoreUtils.isArchived(destinationPartition.getTPartition())) {
                    try {
                        String conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, destinationTable, destinationPartition.getSpec());
                        String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
                        throw new SemanticException(message);
                    } catch (SemanticException err) {
                        throw err;
                    } catch (HiveException err) {
                        throw new SemanticException(err);
                    }
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + destinationPath);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, null);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                if (destTableIsTransactional) {
                    acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    checkAcidConstraints();
                } else {
                    // Transactional tables can't be list bucketed or have skewed cols
                    lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(), destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(), destinationPartition.isStoredAsSubDirectories());
                }
                try {
                    if (ctx.getExplainConfig() != null) {
                        // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                        writeId = null;
                    } else {
                        if (isMmTable) {
                            writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        } else {
                            writeId = (acidOp == Operation.NOT_ACID) ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        }
                    }
                } catch (LockException ex) {
                    throw new SemanticException("Failed to allocate write Id", ex);
                }
                ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, destinationPartition.getSpec(), acidOp, writeId);
                if (writeId != null) {
                    ltd.setStmtId(txnMgr.getCurrentStmtId());
                }
                // For the current context for generating File Sink Operator, it is either INSERT INTO or INSERT OVERWRITE.
                // So the next line works.
                boolean isInsertInto = !qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest);
                // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                // deltas and base and leave them up to the cleaner to clean up
                LoadFileType loadType;
                if (isDirectInsert) {
                    loadType = LoadFileType.IGNORE;
                } else if (!isInsertInto && !destTableIsTransactional) {
                    loadType = LoadFileType.REPLACE_ALL;
                } else {
                    loadType = LoadFileType.KEEP_EXISTING;
                }
                ltd.setLoadFileType(loadType);
                ltd.setInsertOverwrite(!isInsertInto);
                ltd.setIsDirectInsert(isDirectInsert);
                ltd.setLbCtx(lbCtx);
                ltd.setMoveTaskId(moveTaskId);
                loadTableWork.add(ltd);
                if (!outputs.add(new WriteEntity(destinationPartition, determineWriteType(ltd, dest)))) {
                    throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName()));
                }
                break;
            }
        case QBMetaData.DEST_LOCAL_FILE:
            isLocal = true;
        // fall through
        case QBMetaData.DEST_DFS_FILE:
            {
                destinationPath = getDestinationFilePath(qbm.getDestFileForAlias(dest), isMmTable);
                // CTAS case: the file output format and serde are defined by the create
                // table command rather than taking the default value
                List<FieldSchema> fieldSchemas = null;
                List<FieldSchema> partitionColumns = null;
                List<String> partitionColumnNames = null;
                List<FieldSchema> sortColumns = null;
                List<String> sortColumnNames = null;
                List<FieldSchema> distributeColumns = null;
                List<String> distributeColumnNames = null;
                List<ColumnInfo> fileSinkColInfos = null;
                List<ColumnInfo> sortColInfos = null;
                List<ColumnInfo> distributeColInfos = null;
                TableName tableName = null;
                Map<String, String> tblProps = null;
                CreateTableDesc tblDesc = qb.getTableDesc();
                CreateMaterializedViewDesc viewDesc = qb.getViewDesc();
                if (tblDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = tblDesc.getPartColNames();
                    fileSinkColInfos = new ArrayList<>();
                    destTableIsTemporary = tblDesc.isTemporary();
                    destTableIsMaterialization = tblDesc.isMaterialization();
                    tableName = TableName.fromString(tblDesc.getDbTableName(), null, tblDesc.getDatabaseName());
                    tblProps = tblDesc.getTblProps();
                } else if (viewDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = viewDesc.getPartColNames();
                    sortColumns = new ArrayList<>();
                    sortColumnNames = viewDesc.getSortColNames();
                    distributeColumns = new ArrayList<>();
                    distributeColumnNames = viewDesc.getDistributeColNames();
                    fileSinkColInfos = new ArrayList<>();
                    sortColInfos = new ArrayList<>();
                    distributeColInfos = new ArrayList<>();
                    destTableIsTemporary = false;
                    destTableIsMaterialization = false;
                    tableName = HiveTableName.ofNullableWithNoDefault(viewDesc.getViewName());
                    tblProps = viewDesc.getTblProps();
                }
                destTableIsTransactional = tblProps != null && AcidUtils.isTablePropertyTransactional(tblProps);
                if (destTableIsTransactional) {
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = 0L;
                        } else {
                            writeId = txnMgr.getTableWriteId(tableName.getDb(), tableName.getTable());
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    if (AcidUtils.isInsertOnlyTable(tblProps, true)) {
                        isMmTable = isMmCreate = true;
                        if (tblDesc != null) {
                            tblDesc.setInitialMmWriteId(writeId);
                        } else {
                            viewDesc.setInitialMmWriteId(writeId);
                        }
                    }
                }
                if (isLocal) {
                    assert !isMmTable;
                    // for local directory - we always write to map-red intermediate
                    // store and then copy to local fs
                    queryTmpdir = ctx.getMRTmpPath();
                } else {
                    // no copy is required. we may want to revisit this policy in future
                    try {
                        Path qPath = FileUtils.makeQualified(destinationPath, conf);
                        queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
                        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                            Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir + " from " + destinationPath + " (" + isMmTable + ")");
                        }
                    } catch (Exception e) {
                        throw new SemanticException("Error creating temporary folder on: " + destinationPath, e);
                    }
                }
                // Check for dynamic partitions.
                final String cols, colTypes;
                final boolean isPartitioned;
                if (dpCtx != null) {
                    throw new SemanticException("Dynamic partition context has already been created, this should not happen");
                }
                if (!CollectionUtils.isEmpty(partitionColumnNames)) {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, partitionColumnNames, sortColumnNames, distributeColumnNames, fieldSchemas, partitionColumns, sortColumns, distributeColumns, fileSinkColInfos, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    dpCtx = new DynamicPartitionCtx(partitionColumnNames, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                    qbm.setDPCtx(dest, dpCtx);
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                    isPartitioned = true;
                } else {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, sortColumnNames, distributeColumnNames, fieldSchemas, sortColumns, distributeColumns, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    isPartitioned = false;
                }
                // update the create table descriptor with the resulting schema.
                if (tblDesc != null) {
                    tblDesc.setCols(new ArrayList<>(fieldSchemas));
                    tblDesc.setPartCols(new ArrayList<>(partitionColumns));
                } else if (viewDesc != null) {
                    viewDesc.setSchema(new ArrayList<>(fieldSchemas));
                    viewDesc.setPartCols(new ArrayList<>(partitionColumns));
                    if (viewDesc.isOrganized()) {
                        viewDesc.setSortCols(new ArrayList<>(sortColumns));
                        viewDesc.setDistributeCols(new ArrayList<>(distributeColumns));
                    }
                }
                boolean isDestTempFile = true;
                if (ctx.isMRTmpFileURI(destinationPath.toUri().toString()) == false && ctx.isResultCacheDir(destinationPath) == false) {
                    // not a temp dir and not a result cache dir
                    idToTableNameMap.put(String.valueOf(destTableId), destinationPath.toUri().toString());
                    currentTableId = destTableId;
                    destTableId++;
                    isDestTempFile = false;
                }
                if (tblDesc == null) {
                    if (viewDesc != null) {
                        tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
                    } else if (qb.getIsQuery()) {
                        Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
                        String fileFormat = conf.getResultFileFormat().toString();
                        if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
                            serdeClass = ThriftJDBCBinarySerDe.class;
                            fileFormat = ResultFileFormat.SEQUENCEFILE.toString();
                            // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
                            // write out formatted thrift objects to SequenceFile
                            conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
                        } else if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
                            // If this output format is Llap, check to see if Arrow is requested
                            boolean useArrow = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW);
                            serdeClass = useArrow ? ArrowColumnarBatchSerDe.class : LazyBinarySerDe2.class;
                        }
                        tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
                    } else {
                        tableDescriptor = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
                    }
                } else {
                    tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes);
                }
                // if available, set location in table desc properties
                if (tblDesc != null && tblDesc.getLocation() != null && tableDescriptor != null && !tableDescriptor.getProperties().containsKey(hive_metastoreConstants.META_TABLE_LOCATION)) {
                    tableDescriptor.getProperties().setProperty(hive_metastoreConstants.META_TABLE_LOCATION, tblDesc.getLocation());
                }
                // We need a specific rowObjectInspector in this case
                try {
                    specificRowObjectInspector = (StructObjectInspector) tableDescriptor.getDeserializer(conf).getObjectInspector();
                } catch (Exception e) {
                    throw new SemanticException(e.getMessage(), e);
                }
                boolean isDfsDir = (destType == QBMetaData.DEST_DFS_FILE);
                try {
                    destinationTable = tblDesc != null ? tblDesc.toTable(conf) : viewDesc != null ? viewDesc.toTable(conf) : null;
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                if (viewDesc != null && viewDesc.isOrganized()) {
                    input = genMaterializedViewDataOrgPlan(sortColInfos, distributeColInfos, inputRR, input);
                }
                moveTaskId = getMoveTaskId();
                if (isPartitioned) {
                    // Create a SELECT that may reorder the columns if needed
                    RowResolver rowResolver = new RowResolver();
                    List<ExprNodeDesc> columnExprs = new ArrayList<>();
                    List<String> colNames = new ArrayList<>();
                    Map<String, ExprNodeDesc> colExprMap = new HashMap<>();
                    for (int i = 0; i < fileSinkColInfos.size(); i++) {
                        ColumnInfo ci = fileSinkColInfos.get(i);
                        ExprNodeDesc columnExpr = new ExprNodeColumnDesc(ci);
                        String name = getColumnInternalName(i);
                        rowResolver.put("", name, new ColumnInfo(name, columnExpr.getTypeInfo(), "", false));
                        columnExprs.add(columnExpr);
                        colNames.add(name);
                        colExprMap.put(name, columnExpr);
                    }
                    input = putOpInsertMap(OperatorFactory.getAndMakeChild(new SelectDesc(columnExprs, colNames), new RowSchema(rowResolver.getColumnInfos()), input), rowResolver);
                    input.setColumnExprMap(colExprMap);
                    // If this is a partitioned CTAS or MV statement, we are going to create a LoadTableDesc
                    // object. Although the table does not exist in metastore, we will swap the CreateTableTask
                    // and MoveTask resulting from this LoadTable so in this specific case, first we create
                    // the metastore table, then we move and commit the partitions. At least for the time being,
                    // this order needs to be enforced because metastore expects a table to exist before we can
                    // add any partitions to it.
                    isNonNativeTable = tableDescriptor.isNonNative();
                    if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                        AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                        if (destTableIsTransactional) {
                            acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                            checkAcidConstraints();
                        }
                        // isReplace = false in case concurrent operation is executed
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId);
                        if (writeId != null) {
                            ltd.setStmtId(txnMgr.getCurrentStmtId());
                        }
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                        ltd.setInsertOverwrite(false);
                        loadTableWork.add(ltd);
                    } else {
                        // This is a non-native table.
                        // We need to set stats as inaccurate.
                        setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName());
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec());
                        ltd.setInsertOverwrite(false);
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    ltd.setMdTable(destinationTable);
                    WriteEntity output = generateTableWriteEntity(dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx);
                    ctx.getLoadTableOutputMap().put(ltd, output);
                } else {
                    // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
                    LoadFileDesc loadFileDesc = new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, destinationPath, isDfsDir, cols, colTypes, // there is a change here - prev version had 'transactional', one before 'acid'
                    destTableIsFullAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCreate);
                    loadFileDesc.setMoveTaskId(moveTaskId);
                    loadFileWork.add(loadFileDesc);
                    try {
                        Path qualifiedPath = destinationPath.getFileSystem(conf).makeQualified(destinationPath);
                        if (!outputs.add(new WriteEntity(qualifiedPath, !isDfsDir, isDestTempFile))) {
                            throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationPath.toUri().toString()));
                        }
                    } catch (IOException ex) {
                        throw new SemanticException("Error while getting the full qualified path for the given directory: " + ex.getMessage());
                    }
                }
                break;
            }
        default:
            throw new SemanticException("Unknown destination type: " + destType);
    }
    inputRR = opParseCtx.get(input).getRowResolver();
    List<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
    if (updating(dest) || deleting(dest)) {
        vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true));
    } else {
        try {
            // If we already have a specific inspector (view or directory as a target) use that
            // Otherwise use the table deserializer to get the inspector
            StructObjectInspector rowObjectInspector = specificRowObjectInspector != null ? specificRowObjectInspector : (StructObjectInspector) destinationTable.getDeserializer().getObjectInspector();
            List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
            for (StructField field : fields) {
                vecCol.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), "", false));
            }
        } catch (Exception e) {
            throw new SemanticException(e.getMessage(), e);
        }
    }
    RowSchema fsRS = new RowSchema(vecCol);
    // The output files of a FileSink can be merged if they are either not being written to a table
    // or are being written to a table which is not bucketed
    // and table the table is not sorted
    boolean canBeMerged = (destinationTable == null || !((destinationTable.getNumBuckets() > 0) || (destinationTable.getSortCols() != null && destinationTable.getSortCols().size() > 0)));
    // If this table is working with ACID semantics, turn off merging
    canBeMerged &= !destTableIsFullAcid;
    // Generate the partition columns from the parent input
    if (destType == QBMetaData.DEST_TABLE || destType == QBMetaData.DEST_PARTITION) {
        genPartnCols(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
    }
    FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, tableDescriptor, destinationPartition, // this was 1/4 acid
    destinationPath, // this was 1/4 acid
    currentTableId, // this was 1/4 acid
    destTableIsFullAcid, // this was 1/4 acid
    destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, destinationTable, writeId, isMmCreate, destType, qb, isDirectInsert, acidOperation, moveTaskId);
    if (isMmCreate) {
        // Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
        if (tableDesc != null) {
            tableDesc.setWriter(fileSinkDesc);
        } else {
            createVwDesc.setWriter(fileSinkDesc);
        }
    }
    if (fileSinkDesc.getInsertOverwrite()) {
        if (ltd != null) {
            ltd.setInsertOverwrite(true);
        }
    }
    if (null != tableDescriptor && useBatchingSerializer(tableDescriptor.getSerdeClassName())) {
        fileSinkDesc.setIsUsingBatchingSerDe(true);
    } else {
        fileSinkDesc.setIsUsingBatchingSerDe(false);
    }
    Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(fileSinkDesc, fsRS, input), inputRR);
    // in case of a merge statement.
    if (!isDirectInsert || acidOperation == AcidUtils.Operation.INSERT) {
        handleLineage(ltd, output);
    }
    setWriteIdForSurrogateKeys(ltd, input);
    LOG.debug("Created FileSink Plan for clause: {}dest_path: {} row schema: {}", dest, destinationPath, inputRR);
    FileSinkOperator fso = (FileSinkOperator) output;
    fso.getConf().setTable(destinationTable);
    // and it is an insert overwrite or insert into table
    if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && destinationTable != null && (!destinationTable.isNonNative() || destinationTable.getStorageHandler().commitInMoveTask()) && !destTableIsTemporary && !destTableIsMaterialization && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
        if (destType == QBMetaData.DEST_TABLE) {
            genAutoColumnStatsGatheringPipeline(destinationTable, partSpec, input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_PARTITION) {
            genAutoColumnStatsGatheringPipeline(destinationTable, destinationPartition.getSpec(), input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_LOCAL_FILE || destType == QBMetaData.DEST_DFS_FILE) {
            // CTAS or CMV statement
            genAutoColumnStatsGatheringPipeline(destinationTable, null, input, false, true);
        }
    }
    return output;
}
Also used : LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableName(org.apache.hadoop.hive.common.TableName) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) PatternSyntaxException(java.util.regex.PatternSyntaxException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(java.security.AccessControlException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation)

Example 4 with LoadFileType

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType in project hive by apache.

the class ImportSemanticAnalyzer method addSinglePartition.

private static Task<?> addSinglePartition(ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, boolean isReplication, String dumpRoot, ReplicationMetricCollector metricCollector) throws MetaException, IOException, HiveException {
    AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartitions().get(0);
    boolean isSkipTrash = false;
    boolean needRecycle = false;
    if (shouldSkipDataCopyInReplScope(tblDesc, replicationSpec) || (tblDesc.isExternal() && tblDesc.getLocation() == null)) {
        x.getLOG().debug("Adding AddPart and skipped data copy for partition " + partSpecToString(partSpec.getPartSpec()));
        // addPartitionDesc already has the right partition location
        @SuppressWarnings("unchecked") Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, isReplication, dumpRoot, metricCollector), x.getConf());
        return addPartTask;
    } else {
        String srcLocation = partSpec.getLocation();
        if (replicationSpec.isInReplicationScope() && !ReplicationSpec.Type.IMPORT.equals(replicationSpec.getReplSpecType())) {
            Path partLocation = new Path(partSpec.getLocation());
            Path dataDirBase = partLocation.getParent();
            String bucketDir = partLocation.getName();
            for (int i = 1; i < partSpec.getPartSpec().size(); i++) {
                bucketDir = dataDirBase.getName() + File.separator + bucketDir;
                dataDirBase = dataDirBase.getParent();
            }
            String relativePartDataPath = EximUtil.DATA_PATH_NAME + File.separator + bucketDir;
            srcLocation = new Path(dataDirBase, relativePartDataPath).toString();
        }
        fixLocationInPartSpec(tblDesc, table, wh, replicationSpec, partSpec, x);
        x.getLOG().debug("adding dependent CopyWork/AddPart/MoveWork for partition " + partSpecToString(partSpec.getPartSpec()) + " with source location: " + srcLocation);
        Path tgtLocation = new Path(partSpec.getLocation());
        LoadFileType loadFileType;
        Path destPath;
        if (replicationSpec.isInReplicationScope()) {
            loadFileType = LoadFileType.IGNORE;
            destPath = tgtLocation;
            isSkipTrash = MetaStoreUtils.isSkipTrash(table.getParameters());
            if (table.isTemporary()) {
                needRecycle = false;
            } else {
                org.apache.hadoop.hive.metastore.api.Database db = x.getHive().getDatabase(table.getDbName());
                needRecycle = db != null && ReplChangeManager.shouldEnableCm(db, table.getTTable());
            }
        } else {
            loadFileType = replicationSpec.isReplace() ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
            // Replication scope the write id will be invalid
            boolean useStagingDirectory = !AcidUtils.isTransactionalTable(table.getParameters()) || replicationSpec.isInReplicationScope();
            destPath = useStagingDirectory ? x.getCtx().getExternalTmpPath(tgtLocation) : new Path(tgtLocation, AcidUtils.deltaSubdir(writeId, writeId, stmtId));
        }
        Path moveTaskSrc = !AcidUtils.isTransactionalTable(table.getParameters()) || replicationSpec.isInReplicationScope() ? destPath : tgtLocation;
        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
            Utilities.FILE_OP_LOGGER.trace("adding import work for partition with source location: " + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm " + writeId + " for " + partSpecToString(partSpec.getPartSpec()) + ": " + (AcidUtils.isFullAcidTable(table) ? "acid" : (AcidUtils.isInsertOnlyTable(table) ? "mm" : "flat")));
        }
        Task<?> copyTask = null;
        if (replicationSpec.isInReplicationScope()) {
            boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
            copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, new Path(srcLocation), destPath, x.getConf(), isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector);
        } else {
            copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), destPath, false, dumpRoot, metricCollector, isReplication));
        }
        Task<?> addPartTask = null;
        if (x.getEventType() != DumpType.EVENT_COMMIT_TXN) {
            // During replication, by the time we are applying commit transaction event, we expect
            // the partition/s to be already added or altered by previous events. So no need to
            // create add partition event again.
            addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, isReplication, dumpRoot, metricCollector), x.getConf());
        }
        MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false, dumpRoot, metricCollector, isReplication);
        // See setLoadFileType and setIsAcidIow calls elsewhere for an example.
        if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(tblDesc.getTblProps())) {
            LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(Collections.singletonList(destPath), Collections.singletonList(tgtLocation), true, null, null);
            moveWork.setMultiFilesDesc(loadFilesWork);
            moveWork.setNeedCleanTarget(replicationSpec.isReplace());
        } else {
            LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table), partSpec.getPartSpec(), loadFileType, writeId);
            loadTableWork.setStmtId(stmtId);
            loadTableWork.setInheritTableSpecs(false);
            moveWork.setLoadTableWork(loadTableWork);
        }
        if (loadFileType == LoadFileType.IGNORE) {
            // update which is again done in load operations as part of move task.
            if (x.getEventType() == DumpType.EVENT_INSERT) {
                copyTask.addDependentTask(TaskFactory.get(moveWork, x.getConf()));
            } else {
                if (addPartTask != null) {
                    copyTask.addDependentTask(addPartTask);
                }
            }
            return copyTask;
        }
        Task<?> loadPartTask = TaskFactory.get(moveWork, x.getConf());
        copyTask.addDependentTask(loadPartTask);
        if (addPartTask != null) {
            addPartTask.addDependentTask(loadPartTask);
            x.getTasks().add(copyTask);
            return addPartTask;
        }
        return copyTask;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) AlterTableAddPartitionDesc(org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) LoadMultiFilesDesc(org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) CopyWork(org.apache.hadoop.hive.ql.plan.CopyWork) Database(org.apache.hadoop.hive.metastore.api.Database)

Example 5 with LoadFileType

use of org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType in project hive by apache.

the class ImportSemanticAnalyzer method loadTable.

private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, String dumpRoot, ReplicationMetricCollector metricCollector) throws HiveException {
    assert table != null;
    assert table.getParameters() != null;
    Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
    Path destPath = null, loadPath = null;
    LoadFileType lft;
    boolean isSkipTrash = false;
    boolean needRecycle = false;
    if (replicationSpec.isInReplicationScope()) {
        isSkipTrash = MetaStoreUtils.isSkipTrash(table.getParameters());
        if (table.isTemporary()) {
            needRecycle = false;
        } else {
            org.apache.hadoop.hive.metastore.api.Database db = x.getHive().getDatabase(table.getDbName());
            needRecycle = db != null && ReplChangeManager.shouldEnableCm(db, table.getTTable());
        }
    }
    if (AcidUtils.isTransactionalTable(table)) {
        String mmSubdir = replace ? AcidUtils.baseDir(writeId) : AcidUtils.deltaSubdir(writeId, writeId, stmtId);
        destPath = new Path(tgtPath, mmSubdir);
        /**
         * CopyTask below will copy files from the 'archive' to a delta_x_x in the table/partition
         * directory, i.e. the final destination for these files.  This has to be a copy to preserve
         * the archive.  MoveTask is optimized to do a 'rename' if files are on the same FileSystem.
         * So setting 'loadPath' this way will make
         * {@link Hive#loadTable(Path, String, LoadFileType, boolean, boolean, boolean,
         * boolean, Long, int)}
         * skip the unnecessary file (rename) operation but it will perform other things.
         */
        loadPath = tgtPath;
        lft = LoadFileType.KEEP_EXISTING;
    } else {
        destPath = loadPath = x.getCtx().getExternalTmpPath(tgtPath);
        lft = replace ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
    }
    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
        Utilities.FILE_OP_LOGGER.trace("adding import work for table with source location: " + dataPath + "; table: " + tgtPath + "; copy destination " + destPath + "; mm " + writeId + " for " + table.getTableName() + ": " + (AcidUtils.isFullAcidTable(table) ? "acid" : (AcidUtils.isInsertOnlyTable(table) ? "mm" : "flat")));
    }
    Task<?> copyTask = null;
    if (replicationSpec.isInReplicationScope()) {
        boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
        copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf(), isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector);
    } else {
        copyTask = TaskFactory.get(new CopyWork(dataPath, destPath, false, dumpRoot, metricCollector, true));
    }
    MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false, dumpRoot, metricCollector, true);
    if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(table)) {
        LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(Collections.singletonList(destPath), Collections.singletonList(tgtPath), true, null, null);
        moveWork.setMultiFilesDesc(loadFilesWork);
        moveWork.setNeedCleanTarget(replace);
    } else {
        LoadTableDesc loadTableWork = new LoadTableDesc(loadPath, Utilities.getTableDesc(table), new TreeMap<>(), lft, writeId);
        loadTableWork.setStmtId(stmtId);
        moveWork.setLoadTableWork(loadTableWork);
    }
    // if Importing into existing table, FileFormat is checked by
    // ImportSemanticAnalyzer.checked checkTable()
    Task<?> loadTableTask = TaskFactory.get(moveWork, x.getConf());
    copyTask.addDependentTask(loadTableTask);
    x.getTasks().add(copyTask);
    return loadTableTask;
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) LoadMultiFilesDesc(org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CopyWork(org.apache.hadoop.hive.ql.plan.CopyWork) Database(org.apache.hadoop.hive.metastore.api.Database)

Aggregations

Path (org.apache.hadoop.fs.Path)5 LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)5 LoadFileType (org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType)5 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)4 CopyWork (org.apache.hadoop.hive.ql.plan.CopyWork)3 LoadMultiFilesDesc (org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 FileNotFoundException (java.io.FileNotFoundException)2 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 Map (java.util.Map)2 Database (org.apache.hadoop.hive.metastore.api.Database)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 Splitter (com.google.common.base.Splitter)1 ImmutableList (com.google.common.collect.ImmutableList)1 Lists (com.google.common.collect.Lists)1