Search in sources :

Example 1 with FileMergeDesc

use of org.apache.hadoop.hive.ql.plan.FileMergeDesc in project hive by apache.

the class GenMapRedUtils method createMergeTask.

/**
 * Create a block level merge task for RCFiles or stripe level merge task for
 * ORCFiles
 *
 * @param fsInputDesc
 * @param finalName
 * @param hasDynamicPartitions
 * @param ctx
 * @return MergeWork if table is stored as RCFile or ORCFile,
 *         null otherwise
 */
public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, boolean hasDynamicPartitions, CompilationOpContext ctx) throws SemanticException {
    Path inputDir = fsInputDesc.getMergeInputDirName();
    TableDesc tblDesc = fsInputDesc.getTableInfo();
    List<Path> inputDirs = new ArrayList<Path>(1);
    ArrayList<String> inputDirstr = new ArrayList<String>(1);
    // in case of dynamic partitioning and list bucketing
    if (!hasDynamicPartitions && !GenMapRedUtils.isSkewedStoredAsDirs(fsInputDesc)) {
        inputDirs.add(inputDir);
    }
    inputDirstr.add(inputDir.toString());
    // internal input format class for CombineHiveInputFormat
    final Class<? extends InputFormat> internalIFClass;
    if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class)) {
        internalIFClass = RCFileBlockMergeInputFormat.class;
    } else if (tblDesc.getInputFileFormatClass().equals(OrcInputFormat.class)) {
        internalIFClass = OrcFileStripeMergeInputFormat.class;
    } else {
        throw new SemanticException("createMergeTask called on a table with file" + " format other than RCFile or ORCFile");
    }
    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
        Utilities.FILE_OP_LOGGER.trace("creating mergefilework from " + inputDirs + " to " + finalName);
    }
    // create the merge file work
    MergeFileWork work = new MergeFileWork(inputDirs, finalName, hasDynamicPartitions, tblDesc.getInputFileFormatClass().getName(), tblDesc);
    Map<Path, List<String>> pathToAliases = new LinkedHashMap<>();
    pathToAliases.put(inputDir, inputDirstr);
    work.setMapperCannotSpanPartns(true);
    work.setPathToAliases(pathToAliases);
    PartitionDesc pDesc = new PartitionDesc(tblDesc, null);
    pDesc.setInputFileFormatClass(internalIFClass);
    work.addPathToPartitionInfo(inputDir, pDesc);
    work.setListBucketingCtx(fsInputDesc.getLbCtx());
    // create alias to work which contains the merge operator
    LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork = new LinkedHashMap<String, Operator<? extends OperatorDesc>>();
    Operator<? extends OperatorDesc> mergeOp = null;
    final FileMergeDesc fmd;
    if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class)) {
        fmd = new RCFileMergeDesc();
    } else {
        fmd = new OrcFileMergeDesc();
    }
    fmd.setIsMmTable(fsInputDesc.isMmTable());
    boolean isCompactionTable = AcidUtils.isCompactionTable(tblDesc.getProperties());
    fmd.setIsCompactionTable(isCompactionTable);
    fmd.setWriteId(fsInputDesc.getTableWriteId());
    int stmtId = fsInputDesc.getStatementId();
    fmd.setStmtId(stmtId == -1 ? 0 : stmtId);
    fmd.setDpCtx(fsInputDesc.getDynPartCtx());
    fmd.setOutputPath(finalName);
    fmd.setHasDynamicPartitions(work.hasDynamicPartitions());
    fmd.setListBucketingAlterTableConcatenate(work.isListBucketingAlterTableConcatenate());
    int lbLevel = work.getListBucketingCtx() == null ? 0 : work.getListBucketingCtx().calculateListBucketingLevel();
    fmd.setListBucketingDepth(lbLevel);
    mergeOp = OperatorFactory.get(ctx, fmd);
    aliasToWork.put(inputDir.toString(), mergeOp);
    work.setAliasToWork(aliasToWork);
    return work;
}
Also used : Path(org.apache.hadoop.fs.Path) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) DemuxOperator(org.apache.hadoop.hive.ql.exec.DemuxOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) MergeFileWork(org.apache.hadoop.hive.ql.io.merge.MergeFileWork) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) FileMergeDesc(org.apache.hadoop.hive.ql.plan.FileMergeDesc) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) ArrayList(java.util.ArrayList) OrcFileStripeMergeInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat) LinkedHashMap(java.util.LinkedHashMap) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) ArrayList(java.util.ArrayList) List(java.util.List) PartitionDesc(org.apache.hadoop.hive.ql.plan.PartitionDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 2 with FileMergeDesc

use of org.apache.hadoop.hive.ql.plan.FileMergeDesc in project hive by apache.

the class AlterTableConcatenateOperation method getMergeFileWork.

private MergeFileWork getMergeFileWork(CompilationOpContext opContext) {
    List<Path> inputDirList = Lists.newArrayList(desc.getInputDir());
    // merge work only needs input and output.
    MergeFileWork mergeWork = new MergeFileWork(inputDirList, desc.getOutputDir(), desc.getInputFormatClass().getName(), desc.getTableDesc());
    mergeWork.setListBucketingCtx(desc.getLbCtx());
    mergeWork.resolveConcatenateMerge(context.getDb().getConf());
    mergeWork.setMapperCannotSpanPartns(true);
    mergeWork.setSourceTableInputFormat(desc.getInputFormatClass().getName());
    Map<Path, List<String>> pathToAliases = new LinkedHashMap<>();
    List<String> inputDirStr = Lists.newArrayList(inputDirList.toString());
    pathToAliases.put(desc.getInputDir(), inputDirStr);
    mergeWork.setPathToAliases(pathToAliases);
    FileMergeDesc fmd = getFileMergeDesc();
    Operator<? extends OperatorDesc> mergeOp = OperatorFactory.get(opContext, fmd);
    Map<String, Operator<? extends OperatorDesc>> aliasToWork = new LinkedHashMap<String, Operator<? extends OperatorDesc>>();
    aliasToWork.put(inputDirList.toString(), mergeOp);
    mergeWork.setAliasToWork(aliasToWork);
    return mergeWork;
}
Also used : Path(org.apache.hadoop.fs.Path) Operator(org.apache.hadoop.hive.ql.exec.Operator) MergeFileWork(org.apache.hadoop.hive.ql.io.merge.MergeFileWork) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) FileMergeDesc(org.apache.hadoop.hive.ql.plan.FileMergeDesc) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) LinkedHashMap(java.util.LinkedHashMap) List(java.util.List) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc)

Example 3 with FileMergeDesc

use of org.apache.hadoop.hive.ql.plan.FileMergeDesc in project hive by apache.

the class DDLTask method mergeFiles.

/**
 * First, make sure the source table/partition is not
 * archived/indexes/non-rcfile. If either of these is true, throw an
 * exception.
 *
 * The way how it does the merge is to create a BlockMergeTask from the
 * mergeFilesDesc.
 *
 * @param db
 * @param mergeFilesDesc
 * @return
 * @throws HiveException
 */
private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, DriverContext driverContext) throws HiveException {
    ListBucketingCtx lbCtx = mergeFilesDesc.getLbCtx();
    boolean lbatc = lbCtx == null ? false : lbCtx.isSkewedStoredAsDir();
    int lbd = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel();
    // merge work only needs input and output.
    MergeFileWork mergeWork = new MergeFileWork(mergeFilesDesc.getInputDir(), mergeFilesDesc.getOutputDir(), mergeFilesDesc.getInputFormatClass().getName(), mergeFilesDesc.getTableDesc());
    LinkedHashMap<Path, ArrayList<String>> pathToAliases = new LinkedHashMap<>();
    ArrayList<String> inputDirstr = new ArrayList<String>(1);
    inputDirstr.add(mergeFilesDesc.getInputDir().toString());
    pathToAliases.put(mergeFilesDesc.getInputDir().get(0), inputDirstr);
    mergeWork.setPathToAliases(pathToAliases);
    mergeWork.setListBucketingCtx(mergeFilesDesc.getLbCtx());
    mergeWork.resolveConcatenateMerge(db.getConf());
    mergeWork.setMapperCannotSpanPartns(true);
    mergeWork.setSourceTableInputFormat(mergeFilesDesc.getInputFormatClass().getName());
    final FileMergeDesc fmd;
    if (mergeFilesDesc.getInputFormatClass().equals(RCFileInputFormat.class)) {
        fmd = new RCFileMergeDesc();
    } else {
        // safe to assume else is ORC as semantic analyzer will check for RC/ORC
        fmd = new OrcFileMergeDesc();
    }
    fmd.setDpCtx(null);
    fmd.setHasDynamicPartitions(false);
    fmd.setListBucketingAlterTableConcatenate(lbatc);
    fmd.setListBucketingDepth(lbd);
    fmd.setOutputPath(mergeFilesDesc.getOutputDir());
    CompilationOpContext opContext = driverContext.getCtx().getOpContext();
    Operator<? extends OperatorDesc> mergeOp = OperatorFactory.get(opContext, fmd);
    LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork = new LinkedHashMap<String, Operator<? extends OperatorDesc>>();
    aliasToWork.put(mergeFilesDesc.getInputDir().toString(), mergeOp);
    mergeWork.setAliasToWork(aliasToWork);
    DriverContext driverCxt = new DriverContext();
    Task<?> task;
    if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
        TezWork tezWork = new TezWork(queryState.getQueryId(), conf);
        mergeWork.setName("File Merge");
        tezWork.add(mergeWork);
        task = new TezTask();
        ((TezTask) task).setWork(tezWork);
    } else {
        task = new MergeFileTask();
        ((MergeFileTask) task).setWork(mergeWork);
    }
    // initialize the task and execute
    task.initialize(queryState, getQueryPlan(), driverCxt, opContext);
    subtask = task;
    int ret = task.execute(driverCxt);
    if (subtask.getException() != null) {
        setException(subtask.getException());
    }
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) DriverContext(org.apache.hadoop.hive.ql.DriverContext) MergeFileWork(org.apache.hadoop.hive.ql.io.merge.MergeFileWork) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) FileMergeDesc(org.apache.hadoop.hive.ql.plan.FileMergeDesc) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) ArrayList(java.util.ArrayList) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) CheckConstraint(org.apache.hadoop.hive.ql.metadata.CheckConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) UniqueConstraint(org.apache.hadoop.hive.ql.metadata.UniqueConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) LinkedHashMap(java.util.LinkedHashMap) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork) MergeFileTask(org.apache.hadoop.hive.ql.io.merge.MergeFileTask)

Example 4 with FileMergeDesc

use of org.apache.hadoop.hive.ql.plan.FileMergeDesc in project hive by apache.

the class AlterTableConcatenateOperation method getFileMergeDesc.

private FileMergeDesc getFileMergeDesc() {
    // safe to assume else is ORC as semantic analyzer will check for RC/ORC
    FileMergeDesc fmd = (desc.getInputFormatClass().equals(RCFileInputFormat.class)) ? new RCFileMergeDesc() : new OrcFileMergeDesc();
    ListBucketingCtx lbCtx = desc.getLbCtx();
    boolean lbatc = lbCtx == null ? false : lbCtx.isSkewedStoredAsDir();
    int lbd = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel();
    fmd.setDpCtx(null);
    fmd.setHasDynamicPartitions(false);
    fmd.setListBucketingAlterTableConcatenate(lbatc);
    fmd.setListBucketingDepth(lbd);
    fmd.setOutputPath(desc.getOutputDir());
    return fmd;
}
Also used : RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) FileMergeDesc(org.apache.hadoop.hive.ql.plan.FileMergeDesc) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx)

Aggregations

FileMergeDesc (org.apache.hadoop.hive.ql.plan.FileMergeDesc)4 OrcFileMergeDesc (org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc)4 RCFileMergeDesc (org.apache.hadoop.hive.ql.plan.RCFileMergeDesc)4 LinkedHashMap (java.util.LinkedHashMap)3 Path (org.apache.hadoop.fs.Path)3 MergeFileWork (org.apache.hadoop.hive.ql.io.merge.MergeFileWork)3 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Operator (org.apache.hadoop.hive.ql.exec.Operator)2 ListBucketingCtx (org.apache.hadoop.hive.ql.plan.ListBucketingCtx)2 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)1 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)1 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)1 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)1 CompilationOpContext (org.apache.hadoop.hive.ql.CompilationOpContext)1 DriverContext (org.apache.hadoop.hive.ql.DriverContext)1 DemuxOperator (org.apache.hadoop.hive.ql.exec.DemuxOperator)1 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)1 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)1