Search in sources :

Example 1 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class CreateTableHook method postAnalyze.

@Override
public void postAnalyze(HiveSemanticAnalyzerHookContext context, List<Task<?>> rootTasks) throws SemanticException {
    if (rootTasks.size() == 0) {
        // There will be no DDL task created in case if its CREATE TABLE IF NOT EXISTS
        return;
    }
    Task<?> t = rootTasks.get(rootTasks.size() - 1);
    if (!(t instanceof DDLTask)) {
        return;
    }
    DDLTask task = (DDLTask) t;
    DDLDesc d = task.getWork().getDDLDesc();
    if (!(d instanceof CreateTableDesc)) {
        return;
    }
    CreateTableDesc desc = (CreateTableDesc) d;
    Map<String, String> tblProps = desc.getTblProps();
    if (tblProps == null) {
        // tblProps will be null if user didnt use tblprops in his CREATE
        // TABLE cmd.
        tblProps = new HashMap<String, String>();
    }
    // first check if we will allow the user to create table.
    String storageHandler = desc.getStorageHandler();
    if (StringUtils.isNotEmpty(storageHandler)) {
        try {
            HiveStorageHandler storageHandlerInst = HCatUtil.getStorageHandler(context.getConf(), desc.getStorageHandler(), desc.getSerName(), desc.getInputFormat(), desc.getOutputFormat());
        // Authorization checks are performed by the storageHandler.getAuthorizationProvider(), if
        // StorageDelegationAuthorizationProvider is used.
        } catch (IOException e) {
            throw new SemanticException(e);
        }
    }
    try {
        Table table = context.getHive().newTable(desc.getDbTableName());
        if (desc.getLocation() != null) {
            table.setDataLocation(new Path(desc.getLocation()));
        }
        if (desc.getStorageHandler() != null) {
            table.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, desc.getStorageHandler());
        }
        for (Map.Entry<String, String> prop : tblProps.entrySet()) {
            table.setProperty(prop.getKey(), prop.getValue());
        }
        for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
            table.setSerdeParam(prop.getKey(), prop.getValue());
        }
        if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) {
            authorize(table, Privilege.CREATE);
        }
    } catch (HiveException ex) {
        throw new SemanticException(ex);
    }
    desc.setTblProps(tblProps);
    context.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, tableName);
}
Also used : Path(org.apache.hadoop.fs.Path) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) DDLTask(org.apache.hadoop.hive.ql.ddl.DDLTask) DDLDesc(org.apache.hadoop.hive.ql.ddl.DDLDesc) HashMap(java.util.HashMap) Map(java.util.Map) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 2 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class DummyCreateTableHook method postAnalyze.

@Override
public void postAnalyze(HiveSemanticAnalyzerHookContext context, List<Task<?>> rootTasks) throws SemanticException {
    CreateTableDesc desc = (CreateTableDesc) ((DDLTask) rootTasks.get(rootTasks.size() - 1)).getWork().getDDLDesc();
    Map<String, String> tblProps = desc.getTblProps();
    if (tblProps == null) {
        tblProps = new HashMap<String, String>();
    }
    tblProps.put("createdBy", DummyCreateTableHook.class.getName());
    tblProps.put("Message", "Open Source rocks!!");
    desc.setTblProps(tblProps);
}
Also used : CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) DDLTask(org.apache.hadoop.hive.ql.ddl.DDLTask)

Example 3 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class TaskCompiler method compile.

@SuppressWarnings("nls")
public void compile(final ParseContext pCtx, final List<Task<?>> rootTasks, final Set<ReadEntity> inputs, final Set<WriteEntity> outputs) throws SemanticException {
    Context ctx = pCtx.getContext();
    GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx();
    List<Task<MoveWork>> mvTask = new ArrayList<>();
    List<LoadTableDesc> loadTableWork = pCtx.getLoadTableWork();
    List<LoadFileDesc> loadFileWork = pCtx.getLoadFileWork();
    boolean isCStats = pCtx.getQueryProperties().isAnalyzeRewrite();
    int outerQueryLimit = pCtx.getQueryProperties().getOuterQueryLimit();
    boolean directInsertCtas = false;
    if (pCtx.getCreateTable() != null && pCtx.getCreateTable().getStorageHandler() != null) {
        try {
            directInsertCtas = HiveUtils.getStorageHandler(conf, pCtx.getCreateTable().getStorageHandler()).directInsertCTAS();
        } catch (HiveException e) {
            throw new SemanticException("Failed to load storage handler:  " + e.getMessage());
        }
    }
    if (pCtx.getFetchTask() != null) {
        if (pCtx.getFetchTask().getTblDesc() == null) {
            return;
        }
        pCtx.getFetchTask().getWork().setHiveServerQuery(SessionState.get().isHiveServerQuery());
        TableDesc resultTab = pCtx.getFetchTask().getTblDesc();
        // then either the ThriftFormatter or the DefaultFetchFormatter should be used.
        if (!resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())) {
            if (SessionState.get().isHiveServerQuery()) {
                conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName());
            } else {
                String formatterName = conf.get(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER);
                if (formatterName == null || formatterName.isEmpty()) {
                    conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, DefaultFetchFormatter.class.getName());
                }
            }
        }
        return;
    }
    if (!pCtx.getQueryProperties().isAnalyzeCommand()) {
        LOG.debug("Skipping optimize operator plan for analyze command.");
        optimizeOperatorPlan(pCtx);
    }
    /*
     * In case of a select, use a fetch task instead of a move task.
     * If the select is from analyze table column rewrite, don't create a fetch task. Instead create
     * a column stats task later.
     */
    if (pCtx.getQueryProperties().isQuery() && !isCStats) {
        if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) {
            throw new SemanticException(ErrorMsg.INVALID_LOAD_TABLE_FILE_WORK.getMsg());
        }
        LoadFileDesc loadFileDesc = loadFileWork.get(0);
        String cols = loadFileDesc.getColumns();
        String colTypes = loadFileDesc.getColumnTypes();
        TableDesc resultTab = pCtx.getFetchTableDesc();
        boolean shouldSetOutputFormatter = false;
        if (resultTab == null) {
            ResultFileFormat resFileFormat = conf.getResultFileFormat();
            String fileFormat;
            Class<? extends Deserializer> serdeClass;
            if (SessionState.get().getIsUsingThriftJDBCBinarySerDe() && resFileFormat == ResultFileFormat.SEQUENCEFILE) {
                fileFormat = resFileFormat.toString();
                serdeClass = ThriftJDBCBinarySerDe.class;
                shouldSetOutputFormatter = true;
            } else if (resFileFormat == ResultFileFormat.SEQUENCEFILE) {
                // file format is changed so that IF file sink provides list of files to fetch from (instead
                // of whole directory) list status is done on files (which is what HiveSequenceFileInputFormat does)
                fileFormat = "HiveSequenceFile";
                serdeClass = LazySimpleSerDe.class;
            } else {
                // All other cases we use the defined file format and LazySimpleSerde
                fileFormat = resFileFormat.toString();
                serdeClass = LazySimpleSerDe.class;
            }
            resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
        } else {
            shouldSetOutputFormatter = resultTab.getProperties().getProperty(serdeConstants.SERIALIZATION_LIB).equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName());
        }
        if (shouldSetOutputFormatter) {
            // Set the fetch formatter to be a no-op for the ListSinkOperator, since we will
            // read formatted thrift objects from the output SequenceFile written by Tasks.
            conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
        }
        FetchWork fetch = new FetchWork(loadFileDesc.getSourcePath(), resultTab, outerQueryLimit);
        boolean isHiveServerQuery = SessionState.get().isHiveServerQuery();
        fetch.setHiveServerQuery(isHiveServerQuery);
        fetch.setSource(pCtx.getFetchSource());
        fetch.setSink(pCtx.getFetchSink());
        if (isHiveServerQuery && null != resultTab && resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
            fetch.setIsUsingThriftJDBCBinarySerDe(true);
        } else {
            fetch.setIsUsingThriftJDBCBinarySerDe(false);
        }
        // The idea here is to keep an object reference both in FileSink and in FetchTask for list of files
        // to be fetched. During Job close file sink will populate the list and fetch task later will use it
        // to fetch the results.
        Collection<Operator<?>> tableScanOps = Lists.<Operator<?>>newArrayList(pCtx.getTopOps().values());
        Set<FileSinkOperator> fsOps = OperatorUtils.findOperators(tableScanOps, FileSinkOperator.class);
        if (fsOps != null && fsOps.size() == 1) {
            FileSinkOperator op = fsOps.iterator().next();
            Set<FileStatus> filesToFetch = new HashSet<>();
            op.getConf().setFilesToFetch(filesToFetch);
            fetch.setFilesToFetch(filesToFetch);
        }
        pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch));
        // For the FetchTask, the limit optimization requires we fetch all the rows
        // in memory and count how many rows we get. It's not practical if the
        // limit factor is too big
        int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
        if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) {
            LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimization.");
            globalLimitCtx.disableOpt();
        }
        if (outerQueryLimit == 0) {
            // Believe it or not, some tools do generate queries with limit 0 and than expect
            // query to run quickly. Lets meet their requirement.
            LOG.info("Limit 0. No query execution needed.");
            return;
        }
    } else if (!isCStats) {
        for (LoadTableDesc ltd : loadTableWork) {
            Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false));
            mvTask.add(tsk);
        }
        boolean oneLoadFileForCtas = true;
        for (LoadFileDesc lfd : loadFileWork) {
            if (pCtx.getQueryProperties().isCTAS() || pCtx.getQueryProperties().isMaterializedView()) {
                if (!oneLoadFileForCtas) {
                    // should not have more than 1 load file for CTAS.
                    throw new SemanticException("One query is not expected to contain multiple CTAS loads statements");
                }
                setLoadFileLocation(pCtx, lfd);
                oneLoadFileForCtas = false;
            }
            mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false)));
        }
    }
    generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs);
    // For each task, set the key descriptor for the reducer
    for (Task<?> rootTask : rootTasks) {
        GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
    }
    // to be used, please do so
    for (Task<?> rootTask : rootTasks) {
        setInputFormat(rootTask);
    }
    optimizeTaskPlan(rootTasks, pCtx, ctx);
    /*
     * If the query was the result of analyze table column compute statistics rewrite, create
     * a column stats task instead of a fetch task to persist stats to the metastore.
     * As per HIVE-15903, we will also collect table stats when user computes column stats.
     * That means, if isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()
     * We need to collect table stats
     * if isCStats, we need to include a basic stats task
     * else it is ColumnStatsAutoGather, which should have a move task with a stats task already.
     */
    if (isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()) {
        // map from tablename to task (ColumnStatsTask which includes a BasicStatsTask)
        Map<String, StatsTask> map = new LinkedHashMap<>();
        if (isCStats) {
            if (rootTasks == null || rootTasks.size() != 1 || pCtx.getTopOps() == null || pCtx.getTopOps().size() != 1) {
                throw new SemanticException("Can not find correct root task!");
            }
            try {
                Task<?> root = rootTasks.iterator().next();
                StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values().iterator().next(), root, outputs);
                root.addDependentTask(tsk);
                map.put(extractTableFullName(tsk), tsk);
            } catch (HiveException e) {
                throw new SemanticException(e);
            }
            genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0);
        } else {
            Set<Task<?>> leafTasks = new LinkedHashSet<Task<?>>();
            getLeafTasks(rootTasks, leafTasks);
            List<Task<?>> nonStatsLeafTasks = new ArrayList<>();
            for (Task<?> tsk : leafTasks) {
                // map table name to the correct ColumnStatsTask
                if (tsk instanceof StatsTask) {
                    map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk);
                } else {
                    nonStatsLeafTasks.add(tsk);
                }
            }
            // add cStatsTask as a dependent of all the nonStatsLeafTasks
            for (Task<?> tsk : nonStatsLeafTasks) {
                for (Task<?> cStatsTask : map.values()) {
                    tsk.addDependentTask(cStatsTask);
                }
            }
            for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx.getColumnStatsAutoGatherContexts()) {
                if (!columnStatsAutoGatherContext.isInsertInto()) {
                    genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, 0);
                } else {
                    int numBitVector;
                    try {
                        numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
                    } catch (Exception e) {
                        throw new SemanticException(e.getMessage());
                    }
                    genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, numBitVector);
                }
            }
        }
    }
    decideExecMode(rootTasks, ctx, globalLimitCtx);
    // ahead of time by the non-native table
    if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization() && !directInsertCtas) {
        // generate a DDL task and make it a dependent task of the leaf
        CreateTableDesc crtTblDesc = pCtx.getCreateTable();
        crtTblDesc.validate(conf);
        Task<?> crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc));
        patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames()));
    } else if (pCtx.getQueryProperties().isMaterializedView()) {
        // generate a DDL task and make it a dependent task of the leaf
        CreateMaterializedViewDesc viewDesc = pCtx.getCreateViewDesc();
        Task<?> crtViewTask = TaskFactory.get(new DDLWork(inputs, outputs, viewDesc));
        patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtViewTask, CollectionUtils.isEmpty(viewDesc.getPartColNames()));
    } else if (pCtx.getMaterializedViewUpdateDesc() != null) {
        // If there is a materialized view update desc, we create introduce it at the end
        // of the tree.
        MaterializedViewUpdateDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc();
        DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewDesc);
        Set<Task<?>> leafTasks = new LinkedHashSet<Task<?>>();
        getLeafTasks(rootTasks, leafTasks);
        Task<?> materializedViewTask = TaskFactory.get(ddlWork, conf);
        for (Task<?> task : leafTasks) {
            task.addDependentTask(materializedViewTask);
        }
    }
    if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
        LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit());
        pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit());
    }
    if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) {
        LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
        globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());
    }
    Interner<TableDesc> interner = Interners.newStrongInterner();
    // Perform Final chores on generated Map works
    // 1.  Intern the table descriptors
    // 2.  Derive final explain attributes based on previous compilation.
    GenMapRedUtils.finalMapWorkChores(rootTasks, pCtx.getConf(), interner);
}
Also used : FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LinkedHashSet(java.util.LinkedHashSet) DDLTask(org.apache.hadoop.hive.ql.ddl.DDLTask) BasicStatsNoJobTask(org.apache.hadoop.hive.ql.stats.BasicStatsNoJobTask) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) Task(org.apache.hadoop.hive.ql.exec.Task) StatsTask(org.apache.hadoop.hive.ql.exec.StatsTask) MoveTask(org.apache.hadoop.hive.ql.exec.MoveTask) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FileStatus(org.apache.hadoop.fs.FileStatus) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) StatsTask(org.apache.hadoop.hive.ql.exec.StatsTask) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) ThriftFormatter(org.apache.hadoop.hive.serde2.thrift.ThriftFormatter) ThriftJDBCBinarySerDe(org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) Context(org.apache.hadoop.hive.ql.Context) AnalyzeRewriteContext(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) ResultFileFormat(org.apache.hadoop.hive.conf.HiveConf.ResultFileFormat) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) DefaultFetchFormatter(org.apache.hadoop.hive.serde2.DefaultFetchFormatter) FetchWork(org.apache.hadoop.hive.ql.plan.FetchWork) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc)

Example 4 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class SemanticAnalyzer method analyzeCreateTable.

/**
 * Analyze the create table command. If it is a regular create-table or
 * create-table-like statements, we create a DDLWork and return true. If it is
 * a create-table-as-select, we get the necessary info such as the SerDe and
 * Storage Format and put it in QB, and return false, indicating the rest of
 * the semantic analyzer need to deal with the select statement with respect
 * to the SerDe and Storage Format.
 */
ASTNode analyzeCreateTable(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
    TableName qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
    final String dbDotTab = qualifiedTabName.getNotEmptyDbTable();
    String likeTableName = null;
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> partColNames = new ArrayList<>();
    List<String> bucketCols = new ArrayList<String>();
    List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
    List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
    List<SQLUniqueConstraint> uniqueConstraints = new ArrayList<>();
    List<SQLNotNullConstraint> notNullConstraints = new ArrayList<>();
    List<SQLDefaultConstraint> defaultConstraints = new ArrayList<>();
    List<SQLCheckConstraint> checkConstraints = new ArrayList<>();
    List<Order> sortCols = new ArrayList<Order>();
    int numBuckets = -1;
    String comment = null;
    String location = null;
    Map<String, String> tblProps = null;
    boolean ifNotExists = false;
    boolean isExt = false;
    boolean isTemporary = false;
    boolean isManaged = false;
    boolean isMaterialization = false;
    boolean isTransactional = false;
    ASTNode selectStmt = null;
    // regular CREATE TABLE
    final int CREATE_TABLE = 0;
    // CREATE TABLE LIKE ... (CTLT)
    final int CTLT = 1;
    // CREATE TABLE AS SELECT ... (CTAS)
    final int CTAS = 2;
    // CREATE TRANSACTIONAL TABLE
    final int ctt = 3;
    int command_type = CREATE_TABLE;
    List<String> skewedColNames = new ArrayList<String>();
    List<List<String>> skewedValues = new ArrayList<List<String>>();
    Map<List<String>, String> listBucketColValuesMapping = new HashMap<List<String>, String>();
    boolean storedAsDirs = false;
    boolean isUserStorageFormat = false;
    boolean partitionTransformSpecExists = false;
    RowFormatParams rowFormatParams = new RowFormatParams();
    StorageFormat storageFormat = new StorageFormat(conf);
    LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
    int numCh = ast.getChildCount();
    // set storage handler if default handler is provided in config
    String defaultStorageHandler = HiveConf.getVar(conf, HIVE_DEFAULT_STORAGE_HANDLER);
    if (defaultStorageHandler != null && !defaultStorageHandler.isEmpty()) {
        LOG.info("Default storage handler class detected in config. Using storage handler class if exists: '{}'", defaultStorageHandler);
        storageFormat.setStorageHandler(defaultStorageHandler);
        isUserStorageFormat = true;
    }
    /*
     * Check the 1st-level children and do simple semantic checks: 1) CTLT and
     * CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
     * list (target table schema). 3) CTAS does not support partitioning (for
     * now).
     */
    for (int num = 1; num < numCh; num++) {
        ASTNode child = (ASTNode) ast.getChild(num);
        if (storageFormat.fillStorageFormat(child)) {
            isUserStorageFormat = true;
            continue;
        }
        switch(child.getToken().getType()) {
            case HiveParser.TOK_IFNOTEXISTS:
                ifNotExists = true;
                break;
            case HiveParser.KW_EXTERNAL:
                isExt = true;
                break;
            case HiveParser.KW_MANAGED:
                isManaged = true;
                isTransactional = true;
                break;
            case HiveParser.KW_TEMPORARY:
                isTemporary = true;
                isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
                break;
            case HiveParser.KW_TRANSACTIONAL:
                isTransactional = true;
                command_type = ctt;
                break;
            case HiveParser.TOK_LIKETABLE:
                if (child.getChildCount() > 0) {
                    likeTableName = getUnescapedName((ASTNode) child.getChild(0));
                    if (likeTableName != null) {
                        if (command_type == CTAS) {
                            throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
                        }
                        if (cols.size() != 0) {
                            throw new SemanticException(ErrorMsg.CTLT_COLLST_COEXISTENCE.getMsg());
                        }
                    }
                    command_type = CTLT;
                }
                break;
            case // CTAS
            HiveParser.TOK_QUERY:
                if (command_type == CTLT) {
                    throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
                }
                if (cols.size() != 0) {
                    throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg());
                }
                if (partCols.size() != 0 || bucketCols.size() != 0) {
                    boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING);
                    if (dynPart == false) {
                        throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
                    } else {
                        // TODO: support dynamic partition for CTAS
                        throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
                    }
                }
                if (!conf.getBoolVar(ConfVars.HIVE_CTAS_EXTERNAL_TABLES) && isExt) {
                    throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
                }
                command_type = CTAS;
                if (plannerCtx != null) {
                    plannerCtx.setCTASToken(child);
                }
                selectStmt = child;
                break;
            case HiveParser.TOK_TABCOLLIST:
                cols = getColumns(child, true, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf);
                break;
            case HiveParser.TOK_TABLECOMMENT:
                comment = unescapeSQLString(child.getChild(0).getText());
                break;
            case HiveParser.TOK_TABLEPARTCOLS:
                partCols = getColumns(child, false, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf);
                if (hasConstraints(partCols, defaultConstraints, notNullConstraints, checkConstraints)) {
                    // TODO: these constraints should be supported for partition columns
                    throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("NOT NULL,DEFAULT and CHECK Constraints are not allowed with " + "partition columns. "));
                }
                break;
            case HiveParser.TOK_TABLEPARTCOLSBYSPEC:
                List<PartitionTransformSpec> partitionTransformSpec = PartitionTransform.getPartitionTransformSpec(child);
                if (!SessionStateUtil.addResource(conf, hive_metastoreConstants.PARTITION_TRANSFORM_SPEC, partitionTransformSpec)) {
                    throw new SemanticException("Query state attached to Session state must be not null. " + "Partition transform metadata cannot be saved.");
                }
                partitionTransformSpecExists = true;
                break;
            case HiveParser.TOK_TABLEPARTCOLNAMES:
                partColNames = getColumnNames(child);
                break;
            case HiveParser.TOK_ALTERTABLE_BUCKETS:
                bucketCols = getColumnNames((ASTNode) child.getChild(0));
                if (child.getChildCount() == 2) {
                    numBuckets = Integer.parseInt(child.getChild(1).getText());
                } else {
                    sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
                    numBuckets = Integer.parseInt(child.getChild(2).getText());
                }
                break;
            case HiveParser.TOK_TABLEROWFORMAT:
                rowFormatParams.analyzeRowFormat(child);
                break;
            case HiveParser.TOK_TABLELOCATION:
                location = unescapeSQLString(child.getChild(0).getText());
                location = EximUtil.relativeToAbsolutePath(conf, location);
                inputs.add(toReadEntity(location));
                break;
            case HiveParser.TOK_TABLEPROPERTIES:
                tblProps = getProps((ASTNode) child.getChild(0));
                addPropertyReadEntry(tblProps, inputs);
                break;
            case HiveParser.TOK_TABLESERIALIZER:
                child = (ASTNode) child.getChild(0);
                storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
                if (child.getChildCount() == 2) {
                    readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
                }
                break;
            case HiveParser.TOK_TABLESKEWED:
                /**
                 * Throw an error if the user tries to use the DDL with
                 * hive.internal.ddl.list.bucketing.enable set to false.
                 */
                HiveConf hiveConf = SessionState.get().getConf();
                // skewed column names
                skewedColNames = SkewedTableUtils.analyzeSkewedTableDDLColNames(child);
                // skewed value
                skewedValues = SkewedTableUtils.analyzeDDLSkewedValues(child);
                // stored as directories
                storedAsDirs = analyzeStoredAdDirs(child);
                break;
            default:
                throw new AssertionError("Unknown token: " + child.getToken());
        }
    }
    HiveStorageHandler handler;
    try {
        handler = HiveUtils.getStorageHandler(conf, storageFormat.getStorageHandler());
    } catch (HiveException e) {
        throw new SemanticException("Failed to load storage handler:  " + e.getMessage());
    }
    if (handler != null) {
        if (partitionTransformSpecExists && !handler.supportsPartitionTransform()) {
            throw new SemanticException("Partition transform is not supported for " + handler.getClass().getName());
        }
        String fileFormatPropertyKey = handler.getFileFormatPropertyKey();
        if (fileFormatPropertyKey != null) {
            if (tblProps != null && tblProps.containsKey(fileFormatPropertyKey) && storageFormat.getSerdeProps() != null && storageFormat.getSerdeProps().containsKey(fileFormatPropertyKey)) {
                String fileFormat = tblProps.get(fileFormatPropertyKey);
                throw new SemanticException("Provide only one of the following: STORED BY " + fileFormat + " or WITH SERDEPROPERTIES('" + fileFormatPropertyKey + "'='" + fileFormat + "') or" + " TBLPROPERTIES('" + fileFormatPropertyKey + "'='" + fileFormat + "')");
            }
        }
    }
    if (command_type == CREATE_TABLE || command_type == CTLT || command_type == ctt) {
        queryState.setCommandType(HiveOperation.CREATETABLE);
    } else if (command_type == CTAS) {
        queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
    } else {
        throw new SemanticException("Unrecognized command.");
    }
    if (isExt && ConstraintsUtils.hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints, checkConstraints)) {
        throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed."));
    }
    if (checkConstraints != null && !checkConstraints.isEmpty()) {
        ConstraintsUtils.validateCheckConstraint(cols, checkConstraints, ctx.getConf());
    }
    storageFormat.fillDefaultStorageFormat(isExt, false);
    // check for existence of table
    if (ifNotExists) {
        try {
            Table table = getTable(qualifiedTabName, false);
            if (table != null) {
                // table exists
                return null;
            }
        } catch (HiveException e) {
            // should not occur since second parameter to getTableWithQN is false
            throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e);
        }
    }
    if (isTemporary) {
        if (location == null) {
            // it has the same life cycle as the tmp table
            try {
                // Generate a unique ID for temp table path.
                // This path will be fixed for the life of the temp table.
                location = SessionState.generateTempTableLocation(conf);
            } catch (MetaException err) {
                throw new SemanticException("Error while generating temp table path:", err);
            }
        }
    }
    switch(command_type) {
        case // REGULAR CREATE TABLE DDL
        CREATE_TABLE:
            if (!CollectionUtils.isEmpty(partColNames)) {
                throw new SemanticException("Partition columns can only declared using their name and types in regular CREATE TABLE statements");
            }
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
            CreateTableDesc crtTblDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
            crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
            crtTblDesc.validate(conf);
            // outputs is empty, which means this create table happens in the current
            // database.
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc)));
            break;
        case // CREATE TRANSACTIONAL TABLE
        ctt:
            if (isExt) {
                throw new SemanticException(qualifiedTabName.getTable() + " cannot be declared transactional because it's an external table");
            }
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, false, tblProps, storageFormat);
            CreateTableDesc crtTranTblDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            crtTranTblDesc.setStoredAsSubDirectories(storedAsDirs);
            crtTranTblDesc.setNullFormat(rowFormatParams.nullFormat);
            crtTranTblDesc.validate(conf);
            // outputs is empty, which means this create table happens in the current
            // database.
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTranTblDesc)));
            break;
        case // create table like <tbl_name>
        CTLT:
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
            Table likeTable = getTable(likeTableName, false);
            if (likeTable != null) {
                if (isTemporary || isExt) {
                    updateDefaultTblProps(likeTable.getParameters(), tblProps, new ArrayList<>(Arrays.asList(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES)));
                } else {
                    updateDefaultTblProps(likeTable.getParameters(), tblProps, null);
                }
            }
            CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat);
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc)));
            break;
        case // create table as select
        CTAS:
            if (isTemporary) {
                if (!ctx.isExplainSkipExecution() && !isMaterialization) {
                    SessionState ss = SessionState.get();
                    if (ss == null) {
                        throw new SemanticException("No current SessionState, cannot create temporary table " + qualifiedTabName.getNotEmptyDbTable());
                    }
                    Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(qualifiedTabName.getDb(), qualifiedTabName.getTable());
                    if (tables != null && tables.containsKey(qualifiedTabName.getTable())) {
                        throw new SemanticException("Temporary table " + qualifiedTabName.getNotEmptyDbTable() + " already exists");
                    }
                }
            } else {
                // dumpTable is only used to check the conflict for non-temporary tables
                try {
                    Table dumpTable = db.newTable(dbDotTab);
                    if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
                        throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
                    }
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
            }
            if (location != null && location.length() != 0) {
                Path locPath = new Path(location);
                FileSystem curFs = null;
                FileStatus locStats = null;
                try {
                    curFs = locPath.getFileSystem(conf);
                    if (curFs != null) {
                        locStats = curFs.getFileStatus(locPath);
                    }
                    if (locStats != null && locStats.isDir()) {
                        FileStatus[] lStats = curFs.listStatus(locPath);
                        if (lStats != null && lStats.length != 0) {
                            // Don't throw an exception if the target location only contains the staging-dirs
                            for (FileStatus lStat : lStats) {
                                if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) {
                                    throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location));
                                }
                            }
                        }
                    }
                } catch (FileNotFoundException nfe) {
                // we will create the folder if it does not exist.
                } catch (IOException ioE) {
                    LOG.debug("Exception when validate folder", ioE);
                }
            }
            if (!CollectionUtils.isEmpty(partCols)) {
                throw new SemanticException("Partition columns can only declared using their names in CTAS statements");
            }
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            tblProps.put(TABLE_IS_CTAS, "true");
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
            tableDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partColNames, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            tableDesc.setMaterialization(isMaterialization);
            tableDesc.setStoredAsSubDirectories(storedAsDirs);
            tableDesc.setNullFormat(rowFormatParams.nullFormat);
            qb.setTableDesc(tableDesc);
            return selectStmt;
        default:
            throw new SemanticException("Unrecognized command.");
    }
    return null;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) FileStatus(org.apache.hadoop.fs.FileStatus) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) FileSystem(org.apache.hadoop.fs.FileSystem) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) HiveConf(org.apache.hadoop.hive.conf.HiveConf) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Order(org.apache.hadoop.hive.metastore.api.Order) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) TableName(org.apache.hadoop.hive.common.TableName) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) CreateTableLikeDesc(org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Path(org.apache.hadoop.fs.Path) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork)

Example 5 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class SemanticAnalyzer method genFileSinkPlan.

@SuppressWarnings("nls")
protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException {
    RowResolver inputRR = opParseCtx.get(input).getRowResolver();
    QBMetaData qbm = qb.getMetaData();
    Integer destType = qbm.getDestTypeForAlias(dest);
    // destination table if any
    Table destinationTable = null;
    // true for full ACID table and MM table
    boolean destTableIsTransactional;
    // should the destination table be written to using ACID
    boolean destTableIsFullAcid;
    // should we add files directly to the final path
    boolean isDirectInsert = false;
    AcidUtils.Operation acidOperation = null;
    boolean destTableIsTemporary = false;
    boolean destTableIsMaterialization = false;
    // destination partition if any
    Partition destinationPartition = null;
    // the intermediate destination directory
    Path queryTmpdir = null;
    String moveTaskId = null;
    // the final destination directory
    Path destinationPath = null;
    TableDesc tableDescriptor = null;
    StructObjectInspector specificRowObjectInspector = null;
    int currentTableId = 0;
    boolean isLocal = false;
    SortBucketRSCtx rsCtx = new SortBucketRSCtx();
    DynamicPartitionCtx dpCtx = null;
    LoadTableDesc ltd = null;
    ListBucketingCtx lbCtx = null;
    Map<String, String> partSpec = null;
    boolean isMmTable = false, isMmCreate = false, isNonNativeTable = false;
    Long writeId = null;
    HiveTxnManager txnMgr = getTxnMgr();
    switch(destType.intValue()) {
        case QBMetaData.DEST_TABLE:
            {
                destinationTable = qbm.getDestTableForAlias(dest);
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                destTableIsTemporary = destinationTable.isTemporary();
                // Is the user trying to insert into a external tables
                checkExternalTable(destinationTable);
                partSpec = qbm.getPartSpecForAlias(dest);
                destinationPath = destinationTable.getPath();
                checkImmutableTable(qb, destinationTable, destinationPath, false);
                // check for partition
                List<FieldSchema> parts = destinationTable.getPartitionKeys();
                if (parts != null && parts.size() > 0) {
                    // table is partitioned
                    if (partSpec == null || partSpec.size() == 0) {
                        // user did NOT specify partition
                        throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
                    }
                    dpCtx = qbm.getDPCtx(dest);
                    if (dpCtx == null) {
                        destinationTable.validatePartColumnNames(partSpec, false);
                        dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                        qbm.setDPCtx(dest, dpCtx);
                    }
                }
                // Check for dynamic partitions.
                dpCtx = checkDynPart(qb, qbm, destinationTable, partSpec, dest);
                if (dpCtx != null && dpCtx.getSPPath() != null) {
                    destinationPath = new Path(destinationTable.getPath(), dpCtx.getSPPath());
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + destinationPath);
                }
                if (dpCtx != null) {
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, parts);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                // NOTE: specify Dynamic partitions in dest_tab for WriteEntity
                if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                        checkAcidConstraints();
                    } else {
                        lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(), destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(), destinationTable.isStoredAsSubDirectories());
                    }
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = null;
                        } else {
                            if (isMmTable) {
                                writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            } else {
                                writeId = acidOp == Operation.NOT_ACID ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            }
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    boolean isReplace = !qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, isReplace, writeId);
                    if (writeId != null) {
                        ltd.setStmtId(txnMgr.getCurrentStmtId());
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                    // deltas and base and leave them up to the cleaner to clean up
                    boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    LoadFileType loadType;
                    if (isDirectInsert) {
                        loadType = LoadFileType.IGNORE;
                    } else if (!isInsertInto && !destTableIsTransactional) {
                        loadType = LoadFileType.REPLACE_ALL;
                    } else {
                        loadType = LoadFileType.KEEP_EXISTING;
                    }
                    ltd.setLoadFileType(loadType);
                    ltd.setInsertOverwrite(!isInsertInto);
                    ltd.setIsDirectInsert(isDirectInsert);
                    ltd.setLbCtx(lbCtx);
                    loadTableWork.add(ltd);
                } else {
                    // This is a non-native table.
                    // We need to set stats as inaccurate.
                    setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName());
                    // true if it is insert overwrite.
                    boolean overwrite = !qb.getParseInfo().isInsertIntoTable(String.format("%s.%s", destinationTable.getDbName(), destinationTable.getTableName()));
                    createPreInsertDesc(destinationTable, overwrite);
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, partSpec == null ? ImmutableMap.of() : partSpec);
                    ltd.setInsertOverwrite(overwrite);
                    ltd.setLoadFileType(overwrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING);
                }
                if (destinationTable.isMaterializedView()) {
                    materializedViewUpdateDesc = new MaterializedViewUpdateDesc(destinationTable.getFullyQualifiedName(), false, false, true);
                }
                WriteEntity output = generateTableWriteEntity(dest, destinationTable, partSpec, ltd, dpCtx);
                ctx.getLoadTableOutputMap().put(ltd, output);
                break;
            }
        case QBMetaData.DEST_PARTITION:
            {
                destinationPartition = qbm.getDestPartitionForAlias(dest);
                destinationTable = destinationPartition.getTable();
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                checkExternalTable(destinationTable);
                Path partPath = destinationPartition.getDataLocation();
                checkImmutableTable(qb, destinationTable, partPath, true);
                // Previous behavior (HIVE-1707) used to replace the partition's dfs with the table's dfs.
                // The changes in HIVE-19891 appears to no longer support that behavior.
                destinationPath = partPath;
                if (MetaStoreUtils.isArchived(destinationPartition.getTPartition())) {
                    try {
                        String conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, destinationTable, destinationPartition.getSpec());
                        String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
                        throw new SemanticException(message);
                    } catch (SemanticException err) {
                        throw err;
                    } catch (HiveException err) {
                        throw new SemanticException(err);
                    }
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + destinationPath);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, null);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                if (destTableIsTransactional) {
                    acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    checkAcidConstraints();
                } else {
                    // Transactional tables can't be list bucketed or have skewed cols
                    lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(), destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(), destinationPartition.isStoredAsSubDirectories());
                }
                try {
                    if (ctx.getExplainConfig() != null) {
                        // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                        writeId = null;
                    } else {
                        if (isMmTable) {
                            writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        } else {
                            writeId = (acidOp == Operation.NOT_ACID) ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        }
                    }
                } catch (LockException ex) {
                    throw new SemanticException("Failed to allocate write Id", ex);
                }
                ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, destinationPartition.getSpec(), acidOp, writeId);
                if (writeId != null) {
                    ltd.setStmtId(txnMgr.getCurrentStmtId());
                }
                // For the current context for generating File Sink Operator, it is either INSERT INTO or INSERT OVERWRITE.
                // So the next line works.
                boolean isInsertInto = !qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest);
                // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                // deltas and base and leave them up to the cleaner to clean up
                LoadFileType loadType;
                if (isDirectInsert) {
                    loadType = LoadFileType.IGNORE;
                } else if (!isInsertInto && !destTableIsTransactional) {
                    loadType = LoadFileType.REPLACE_ALL;
                } else {
                    loadType = LoadFileType.KEEP_EXISTING;
                }
                ltd.setLoadFileType(loadType);
                ltd.setInsertOverwrite(!isInsertInto);
                ltd.setIsDirectInsert(isDirectInsert);
                ltd.setLbCtx(lbCtx);
                ltd.setMoveTaskId(moveTaskId);
                loadTableWork.add(ltd);
                if (!outputs.add(new WriteEntity(destinationPartition, determineWriteType(ltd, dest)))) {
                    throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName()));
                }
                break;
            }
        case QBMetaData.DEST_LOCAL_FILE:
            isLocal = true;
        // fall through
        case QBMetaData.DEST_DFS_FILE:
            {
                destinationPath = getDestinationFilePath(qbm.getDestFileForAlias(dest), isMmTable);
                // CTAS case: the file output format and serde are defined by the create
                // table command rather than taking the default value
                List<FieldSchema> fieldSchemas = null;
                List<FieldSchema> partitionColumns = null;
                List<String> partitionColumnNames = null;
                List<FieldSchema> sortColumns = null;
                List<String> sortColumnNames = null;
                List<FieldSchema> distributeColumns = null;
                List<String> distributeColumnNames = null;
                List<ColumnInfo> fileSinkColInfos = null;
                List<ColumnInfo> sortColInfos = null;
                List<ColumnInfo> distributeColInfos = null;
                TableName tableName = null;
                Map<String, String> tblProps = null;
                CreateTableDesc tblDesc = qb.getTableDesc();
                CreateMaterializedViewDesc viewDesc = qb.getViewDesc();
                if (tblDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = tblDesc.getPartColNames();
                    fileSinkColInfos = new ArrayList<>();
                    destTableIsTemporary = tblDesc.isTemporary();
                    destTableIsMaterialization = tblDesc.isMaterialization();
                    tableName = TableName.fromString(tblDesc.getDbTableName(), null, tblDesc.getDatabaseName());
                    tblProps = tblDesc.getTblProps();
                } else if (viewDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = viewDesc.getPartColNames();
                    sortColumns = new ArrayList<>();
                    sortColumnNames = viewDesc.getSortColNames();
                    distributeColumns = new ArrayList<>();
                    distributeColumnNames = viewDesc.getDistributeColNames();
                    fileSinkColInfos = new ArrayList<>();
                    sortColInfos = new ArrayList<>();
                    distributeColInfos = new ArrayList<>();
                    destTableIsTemporary = false;
                    destTableIsMaterialization = false;
                    tableName = HiveTableName.ofNullableWithNoDefault(viewDesc.getViewName());
                    tblProps = viewDesc.getTblProps();
                }
                destTableIsTransactional = tblProps != null && AcidUtils.isTablePropertyTransactional(tblProps);
                if (destTableIsTransactional) {
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = 0L;
                        } else {
                            writeId = txnMgr.getTableWriteId(tableName.getDb(), tableName.getTable());
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    if (AcidUtils.isInsertOnlyTable(tblProps, true)) {
                        isMmTable = isMmCreate = true;
                        if (tblDesc != null) {
                            tblDesc.setInitialMmWriteId(writeId);
                        } else {
                            viewDesc.setInitialMmWriteId(writeId);
                        }
                    }
                }
                if (isLocal) {
                    assert !isMmTable;
                    // for local directory - we always write to map-red intermediate
                    // store and then copy to local fs
                    queryTmpdir = ctx.getMRTmpPath();
                } else {
                    // no copy is required. we may want to revisit this policy in future
                    try {
                        Path qPath = FileUtils.makeQualified(destinationPath, conf);
                        queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
                        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                            Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir + " from " + destinationPath + " (" + isMmTable + ")");
                        }
                    } catch (Exception e) {
                        throw new SemanticException("Error creating temporary folder on: " + destinationPath, e);
                    }
                }
                // Check for dynamic partitions.
                final String cols, colTypes;
                final boolean isPartitioned;
                if (dpCtx != null) {
                    throw new SemanticException("Dynamic partition context has already been created, this should not happen");
                }
                if (!CollectionUtils.isEmpty(partitionColumnNames)) {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, partitionColumnNames, sortColumnNames, distributeColumnNames, fieldSchemas, partitionColumns, sortColumns, distributeColumns, fileSinkColInfos, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    dpCtx = new DynamicPartitionCtx(partitionColumnNames, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                    qbm.setDPCtx(dest, dpCtx);
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                    isPartitioned = true;
                } else {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, sortColumnNames, distributeColumnNames, fieldSchemas, sortColumns, distributeColumns, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    isPartitioned = false;
                }
                // update the create table descriptor with the resulting schema.
                if (tblDesc != null) {
                    tblDesc.setCols(new ArrayList<>(fieldSchemas));
                    tblDesc.setPartCols(new ArrayList<>(partitionColumns));
                } else if (viewDesc != null) {
                    viewDesc.setSchema(new ArrayList<>(fieldSchemas));
                    viewDesc.setPartCols(new ArrayList<>(partitionColumns));
                    if (viewDesc.isOrganized()) {
                        viewDesc.setSortCols(new ArrayList<>(sortColumns));
                        viewDesc.setDistributeCols(new ArrayList<>(distributeColumns));
                    }
                }
                boolean isDestTempFile = true;
                if (ctx.isMRTmpFileURI(destinationPath.toUri().toString()) == false && ctx.isResultCacheDir(destinationPath) == false) {
                    // not a temp dir and not a result cache dir
                    idToTableNameMap.put(String.valueOf(destTableId), destinationPath.toUri().toString());
                    currentTableId = destTableId;
                    destTableId++;
                    isDestTempFile = false;
                }
                if (tblDesc == null) {
                    if (viewDesc != null) {
                        tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
                    } else if (qb.getIsQuery()) {
                        Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
                        String fileFormat = conf.getResultFileFormat().toString();
                        if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
                            serdeClass = ThriftJDBCBinarySerDe.class;
                            fileFormat = ResultFileFormat.SEQUENCEFILE.toString();
                            // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
                            // write out formatted thrift objects to SequenceFile
                            conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
                        } else if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
                            // If this output format is Llap, check to see if Arrow is requested
                            boolean useArrow = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW);
                            serdeClass = useArrow ? ArrowColumnarBatchSerDe.class : LazyBinarySerDe2.class;
                        }
                        tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
                    } else {
                        tableDescriptor = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
                    }
                } else {
                    tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes);
                }
                // if available, set location in table desc properties
                if (tblDesc != null && tblDesc.getLocation() != null && tableDescriptor != null && !tableDescriptor.getProperties().containsKey(hive_metastoreConstants.META_TABLE_LOCATION)) {
                    tableDescriptor.getProperties().setProperty(hive_metastoreConstants.META_TABLE_LOCATION, tblDesc.getLocation());
                }
                // We need a specific rowObjectInspector in this case
                try {
                    specificRowObjectInspector = (StructObjectInspector) tableDescriptor.getDeserializer(conf).getObjectInspector();
                } catch (Exception e) {
                    throw new SemanticException(e.getMessage(), e);
                }
                boolean isDfsDir = (destType == QBMetaData.DEST_DFS_FILE);
                try {
                    destinationTable = tblDesc != null ? tblDesc.toTable(conf) : viewDesc != null ? viewDesc.toTable(conf) : null;
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                if (viewDesc != null && viewDesc.isOrganized()) {
                    input = genMaterializedViewDataOrgPlan(sortColInfos, distributeColInfos, inputRR, input);
                }
                moveTaskId = getMoveTaskId();
                if (isPartitioned) {
                    // Create a SELECT that may reorder the columns if needed
                    RowResolver rowResolver = new RowResolver();
                    List<ExprNodeDesc> columnExprs = new ArrayList<>();
                    List<String> colNames = new ArrayList<>();
                    Map<String, ExprNodeDesc> colExprMap = new HashMap<>();
                    for (int i = 0; i < fileSinkColInfos.size(); i++) {
                        ColumnInfo ci = fileSinkColInfos.get(i);
                        ExprNodeDesc columnExpr = new ExprNodeColumnDesc(ci);
                        String name = getColumnInternalName(i);
                        rowResolver.put("", name, new ColumnInfo(name, columnExpr.getTypeInfo(), "", false));
                        columnExprs.add(columnExpr);
                        colNames.add(name);
                        colExprMap.put(name, columnExpr);
                    }
                    input = putOpInsertMap(OperatorFactory.getAndMakeChild(new SelectDesc(columnExprs, colNames), new RowSchema(rowResolver.getColumnInfos()), input), rowResolver);
                    input.setColumnExprMap(colExprMap);
                    // If this is a partitioned CTAS or MV statement, we are going to create a LoadTableDesc
                    // object. Although the table does not exist in metastore, we will swap the CreateTableTask
                    // and MoveTask resulting from this LoadTable so in this specific case, first we create
                    // the metastore table, then we move and commit the partitions. At least for the time being,
                    // this order needs to be enforced because metastore expects a table to exist before we can
                    // add any partitions to it.
                    isNonNativeTable = tableDescriptor.isNonNative();
                    if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                        AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                        if (destTableIsTransactional) {
                            acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                            checkAcidConstraints();
                        }
                        // isReplace = false in case concurrent operation is executed
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId);
                        if (writeId != null) {
                            ltd.setStmtId(txnMgr.getCurrentStmtId());
                        }
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                        ltd.setInsertOverwrite(false);
                        loadTableWork.add(ltd);
                    } else {
                        // This is a non-native table.
                        // We need to set stats as inaccurate.
                        setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName());
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec());
                        ltd.setInsertOverwrite(false);
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    ltd.setMdTable(destinationTable);
                    WriteEntity output = generateTableWriteEntity(dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx);
                    ctx.getLoadTableOutputMap().put(ltd, output);
                } else {
                    // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
                    LoadFileDesc loadFileDesc = new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, destinationPath, isDfsDir, cols, colTypes, // there is a change here - prev version had 'transactional', one before 'acid'
                    destTableIsFullAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCreate);
                    loadFileDesc.setMoveTaskId(moveTaskId);
                    loadFileWork.add(loadFileDesc);
                    try {
                        Path qualifiedPath = destinationPath.getFileSystem(conf).makeQualified(destinationPath);
                        if (!outputs.add(new WriteEntity(qualifiedPath, !isDfsDir, isDestTempFile))) {
                            throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationPath.toUri().toString()));
                        }
                    } catch (IOException ex) {
                        throw new SemanticException("Error while getting the full qualified path for the given directory: " + ex.getMessage());
                    }
                }
                break;
            }
        default:
            throw new SemanticException("Unknown destination type: " + destType);
    }
    inputRR = opParseCtx.get(input).getRowResolver();
    List<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
    if (updating(dest) || deleting(dest)) {
        vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true));
    } else {
        try {
            // If we already have a specific inspector (view or directory as a target) use that
            // Otherwise use the table deserializer to get the inspector
            StructObjectInspector rowObjectInspector = specificRowObjectInspector != null ? specificRowObjectInspector : (StructObjectInspector) destinationTable.getDeserializer().getObjectInspector();
            List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
            for (StructField field : fields) {
                vecCol.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), "", false));
            }
        } catch (Exception e) {
            throw new SemanticException(e.getMessage(), e);
        }
    }
    RowSchema fsRS = new RowSchema(vecCol);
    // The output files of a FileSink can be merged if they are either not being written to a table
    // or are being written to a table which is not bucketed
    // and table the table is not sorted
    boolean canBeMerged = (destinationTable == null || !((destinationTable.getNumBuckets() > 0) || (destinationTable.getSortCols() != null && destinationTable.getSortCols().size() > 0)));
    // If this table is working with ACID semantics, turn off merging
    canBeMerged &= !destTableIsFullAcid;
    // Generate the partition columns from the parent input
    if (destType == QBMetaData.DEST_TABLE || destType == QBMetaData.DEST_PARTITION) {
        genPartnCols(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
    }
    FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, tableDescriptor, destinationPartition, // this was 1/4 acid
    destinationPath, // this was 1/4 acid
    currentTableId, // this was 1/4 acid
    destTableIsFullAcid, // this was 1/4 acid
    destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, destinationTable, writeId, isMmCreate, destType, qb, isDirectInsert, acidOperation, moveTaskId);
    if (isMmCreate) {
        // Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
        if (tableDesc != null) {
            tableDesc.setWriter(fileSinkDesc);
        } else {
            createVwDesc.setWriter(fileSinkDesc);
        }
    }
    if (fileSinkDesc.getInsertOverwrite()) {
        if (ltd != null) {
            ltd.setInsertOverwrite(true);
        }
    }
    if (null != tableDescriptor && useBatchingSerializer(tableDescriptor.getSerdeClassName())) {
        fileSinkDesc.setIsUsingBatchingSerDe(true);
    } else {
        fileSinkDesc.setIsUsingBatchingSerDe(false);
    }
    Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(fileSinkDesc, fsRS, input), inputRR);
    // in case of a merge statement.
    if (!isDirectInsert || acidOperation == AcidUtils.Operation.INSERT) {
        handleLineage(ltd, output);
    }
    setWriteIdForSurrogateKeys(ltd, input);
    LOG.debug("Created FileSink Plan for clause: {}dest_path: {} row schema: {}", dest, destinationPath, inputRR);
    FileSinkOperator fso = (FileSinkOperator) output;
    fso.getConf().setTable(destinationTable);
    // and it is an insert overwrite or insert into table
    if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && destinationTable != null && (!destinationTable.isNonNative() || destinationTable.getStorageHandler().commitInMoveTask()) && !destTableIsTemporary && !destTableIsMaterialization && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
        if (destType == QBMetaData.DEST_TABLE) {
            genAutoColumnStatsGatheringPipeline(destinationTable, partSpec, input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_PARTITION) {
            genAutoColumnStatsGatheringPipeline(destinationTable, destinationPartition.getSpec(), input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_LOCAL_FILE || destType == QBMetaData.DEST_DFS_FILE) {
            // CTAS or CMV statement
            genAutoColumnStatsGatheringPipeline(destinationTable, null, input, false, true);
        }
    }
    return output;
}
Also used : LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableName(org.apache.hadoop.hive.common.TableName) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) PatternSyntaxException(java.util.regex.PatternSyntaxException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(java.security.AccessControlException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation)

Aggregations

CreateTableDesc (org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc)12 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)7 Path (org.apache.hadoop.fs.Path)5 DDLTask (org.apache.hadoop.hive.ql.ddl.DDLTask)5 HiveStorageHandler (org.apache.hadoop.hive.ql.metadata.HiveStorageHandler)5 Table (org.apache.hadoop.hive.ql.metadata.Table)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 LinkedHashMap (java.util.LinkedHashMap)4 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)4 FileNotFoundException (java.io.FileNotFoundException)3 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)3 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)3 CreateMaterializedViewDesc (org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc)3 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)3 LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)3 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)3 AccessControlException (java.security.AccessControlException)2 LinkedList (java.util.LinkedList)2