Search in sources :

Example 6 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class PlanUtils method getDefaultTableDesc.

public static TableDesc getDefaultTableDesc(CreateTableDesc directoryDesc, String cols, String colTypes) {
    // TODO: this should have an option for directory to inherit from the parent table,
    // including bucketing and list bucketing, for the use in compaction when the
    // latter runs inside a transaction.
    TableDesc ret = getDefaultTableDesc(Integer.toString(Utilities.ctrlaCode), cols, colTypes, false);
    ;
    if (directoryDesc == null) {
        return ret;
    }
    try {
        Properties properties = ret.getProperties();
        if (directoryDesc.getFieldDelim() != null) {
            properties.setProperty(serdeConstants.FIELD_DELIM, directoryDesc.getFieldDelim());
            properties.setProperty(serdeConstants.SERIALIZATION_FORMAT, directoryDesc.getFieldDelim());
        }
        if (directoryDesc.getLineDelim() != null) {
            properties.setProperty(serdeConstants.LINE_DELIM, directoryDesc.getLineDelim());
        }
        if (directoryDesc.getCollItemDelim() != null) {
            properties.setProperty(serdeConstants.COLLECTION_DELIM, directoryDesc.getCollItemDelim());
        }
        if (directoryDesc.getMapKeyDelim() != null) {
            properties.setProperty(serdeConstants.MAPKEY_DELIM, directoryDesc.getMapKeyDelim());
        }
        if (directoryDesc.getFieldEscape() != null) {
            properties.setProperty(serdeConstants.ESCAPE_CHAR, directoryDesc.getFieldEscape());
        }
        if (directoryDesc.getSerName() != null) {
            properties.setProperty(serdeConstants.SERIALIZATION_LIB, directoryDesc.getSerName());
        }
        if (directoryDesc.getSerdeProps() != null) {
            properties.putAll(directoryDesc.getSerdeProps());
        }
        if (directoryDesc.getInputFormat() != null) {
            ret.setInputFileFormatClass(JavaUtils.loadClass(directoryDesc.getInputFormat()));
        }
        if (directoryDesc.getOutputFormat() != null) {
            ret.setOutputFileFormatClass(JavaUtils.loadClass(directoryDesc.getOutputFormat()));
        }
        if (directoryDesc.getNullFormat() != null) {
            properties.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, directoryDesc.getNullFormat());
        }
        if (directoryDesc.getTblProps() != null) {
            properties.putAll(directoryDesc.getTblProps());
        }
    } catch (ClassNotFoundException e) {
        // mimicking behaviour in CreateTableDesc tableDesc creation
        // returning null table description for output.
        LOG.warn("Unable to find class in getDefaultTableDesc: " + e.getMessage(), e);
        return null;
    }
    return ret;
}
Also used : CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Properties(java.util.Properties)

Example 7 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class MoveTask method checkAndCommitNatively.

/**
 * Checks if the StorageHandler provides methods for committing changes which should be used instead of the file
 * moves. This commit will be executed here if possible.
 * @param moveWork The {@link MoveWork} we would like to commit
 * @param configuration The Configuration used to instantiate the {@link HiveStorageHandler} for the target table
 * @return Returns <code>true</code> if the commit was successfully executed
 * @throws HiveException If we tried to commit, but there was an error during the process
 */
private static boolean checkAndCommitNatively(MoveWork moveWork, Configuration configuration) throws HiveException {
    String storageHandlerClass = null;
    Properties commitProperties = null;
    boolean overwrite = false;
    if (moveWork.getLoadTableWork() != null) {
        // Get the info from the table data
        TableDesc tableDesc = moveWork.getLoadTableWork().getTable();
        storageHandlerClass = tableDesc.getProperties().getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE);
        commitProperties = new Properties(tableDesc.getProperties());
        overwrite = moveWork.getLoadTableWork().isInsertOverwrite();
    } else if (moveWork.getLoadFileWork() != null) {
        // Get the info from the create table data
        CreateTableDesc createTableDesc = moveWork.getLoadFileWork().getCtasCreateTableDesc();
        if (createTableDesc != null) {
            storageHandlerClass = createTableDesc.getStorageHandler();
            commitProperties = new Properties();
            commitProperties.put(hive_metastoreConstants.META_TABLE_NAME, createTableDesc.getDbTableName());
        }
    }
    // If the storage handler supports native commits the use that instead of moving files
    if (storageHandlerClass != null) {
        HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(configuration, storageHandlerClass);
        if (storageHandler.commitInMoveTask()) {
            storageHandler.storageHandlerCommit(commitProperties, overwrite);
            return true;
        }
    }
    return false;
}
Also used : HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Properties(java.util.Properties)

Example 8 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class PlanUtils method getTableDesc.

/**
 * Generate a table descriptor from a createTableDesc.
 */
public static TableDesc getTableDesc(CreateTableDesc crtTblDesc, String cols, String colTypes) {
    TableDesc ret;
    // Resolve storage handler (if any)
    try {
        HiveStorageHandler storageHandler = null;
        if (crtTblDesc.getStorageHandler() != null) {
            storageHandler = HiveUtils.getStorageHandler(SessionState.getSessionConf(), crtTblDesc.getStorageHandler());
        }
        Class<? extends Deserializer> serdeClass = getDefaultSerDe();
        String separatorCode = Integer.toString(Utilities.ctrlaCode);
        String columns = cols;
        String columnTypes = colTypes;
        boolean lastColumnTakesRestOfTheLine = false;
        if (storageHandler != null) {
            serdeClass = storageHandler.getSerDeClass();
        } else if (crtTblDesc.getSerName() != null) {
            serdeClass = JavaUtils.loadClass(crtTblDesc.getSerName());
        }
        if (crtTblDesc.getFieldDelim() != null) {
            separatorCode = crtTblDesc.getFieldDelim();
        }
        ret = getTableDesc(serdeClass, separatorCode, columns, columnTypes, crtTblDesc.getPartCols(), lastColumnTakesRestOfTheLine);
        // set other table properties
        Properties properties = ret.getProperties();
        if (crtTblDesc.getStorageHandler() != null) {
            properties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, crtTblDesc.getStorageHandler());
        }
        if (crtTblDesc.getCollItemDelim() != null) {
            properties.setProperty(serdeConstants.COLLECTION_DELIM, crtTblDesc.getCollItemDelim());
        }
        if (crtTblDesc.getMapKeyDelim() != null) {
            properties.setProperty(serdeConstants.MAPKEY_DELIM, crtTblDesc.getMapKeyDelim());
        }
        if (crtTblDesc.getFieldEscape() != null) {
            properties.setProperty(serdeConstants.ESCAPE_CHAR, crtTblDesc.getFieldEscape());
        }
        if (crtTblDesc.getLineDelim() != null) {
            properties.setProperty(serdeConstants.LINE_DELIM, crtTblDesc.getLineDelim());
        }
        if (crtTblDesc.getNullFormat() != null) {
            properties.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, crtTblDesc.getNullFormat());
        }
        if (crtTblDesc.getDbTableName() != null && crtTblDesc.getDatabaseName() != null) {
            properties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME, crtTblDesc.getDbTableName());
        }
        if (crtTblDesc.getTblProps() != null) {
            properties.putAll(crtTblDesc.getTblProps());
        }
        if (crtTblDesc.getSerdeProps() != null) {
            properties.putAll(crtTblDesc.getSerdeProps());
        }
        // replace the default input & output file format with those found in
        // crtTblDesc
        Class<? extends InputFormat> in_class;
        if (storageHandler != null) {
            in_class = storageHandler.getInputFormatClass();
        } else {
            in_class = JavaUtils.loadClass(crtTblDesc.getInputFormat());
        }
        Class<? extends OutputFormat> out_class;
        if (storageHandler != null) {
            out_class = storageHandler.getOutputFormatClass();
        } else {
            out_class = JavaUtils.loadClass(crtTblDesc.getOutputFormat());
        }
        ret.setInputFileFormatClass(in_class);
        ret.setOutputFileFormatClass(out_class);
    } catch (ClassNotFoundException e) {
        throw new RuntimeException("Unable to find class in getTableDesc: " + e.getMessage(), e);
    } catch (HiveException e) {
        throw new RuntimeException("Error loading storage handler in getTableDesc: " + e.getMessage(), e);
    }
    return ret;
}
Also used : HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Properties(java.util.Properties)

Example 9 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class SemanticAnalyzer method getMetaData.

@SuppressWarnings("nls")
private void getMetaData(QB qb, ReadEntity parentInput) throws HiveException {
    LOG.info("Get metadata for source tables");
    // Go over the tables and populate the related structures.
    // We have to materialize the table alias list since we might
    // modify it in the middle for view rewrite.
    List<String> tabAliases = new ArrayList<String>(qb.getTabAliases());
    // Keep track of view alias to view name and read entity
    // For eg: for a query like 'select * from V3', where V3 -> V2, V2 -> V1, V1 -> T
    // keeps track of full view name and read entity corresponding to alias V3, V3:V2, V3:V2:V1.
    // This is needed for tracking the dependencies for inputs, along with their parents.
    Map<String, Pair<String, ReadEntity>> aliasToViewInfo = new HashMap<String, Pair<String, ReadEntity>>();
    /*
     * used to capture view to SQ conversions. This is used to check for
     * recursive CTE invocations.
     */
    Map<String, String> sqAliasToCTEName = new HashMap<String, String>();
    for (String alias : tabAliases) {
        String tabName = qb.getTabNameForAlias(alias);
        String cteName = tabName.toLowerCase();
        // Get table details from tabNameToTabObject cache
        Table tab = aliasToCTEs.containsKey(tabName) ? null : getTableObjectByName(tabName, false);
        if (tab != null) {
            Table newTab = tab.makeCopy();
            tab = newTab;
        }
        if (tab == null || tab.getDbName().equals(SessionState.get().getCurrentDatabase())) {
            Table materializedTab = ctx.getMaterializedTable(cteName);
            if (materializedTab == null) {
                // we first look for this alias from CTE, and then from catalog.
                CTEClause cte = findCTEFromName(qb, cteName);
                if (cte != null) {
                    if (!cte.materialize) {
                        addCTEAsSubQuery(qb, cteName, alias);
                        sqAliasToCTEName.put(alias, cteName);
                        continue;
                    }
                    tab = materializeCTE(cteName, cte);
                }
            } else {
                tab = materializedTab;
            }
        }
        if (tab == null) {
            if (tabName.equals(DUMMY_DATABASE + "." + DUMMY_TABLE)) {
                continue;
            }
            ASTNode src = qb.getParseInfo().getSrcForAlias(alias);
            if (null != src) {
                if (src.getChildCount() == 3) {
                    throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg() + " '" + src.getChild(2).getText() + "'");
                }
                throw new SemanticException(ASTErrorUtils.getMsg(ErrorMsg.INVALID_TABLE.getMsg(), src));
            } else {
                throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(alias));
            }
        }
        Pair<String, String> asOf = qb.getAsOfForAlias(alias);
        if (asOf != null) {
            if (!Optional.ofNullable(tab.getStorageHandler()).map(HiveStorageHandler::isTimeTravelAllowed).orElse(false)) {
                throw new SemanticException(ErrorMsg.TIME_TRAVEL_NOT_ALLOWED, alias);
            }
            tab.setAsOfVersion(asOf.getLeft());
            tab.setAsOfTimestamp(asOf.getRight());
        }
        if (tab.isView()) {
            if (qb.getParseInfo().isAnalyzeCommand()) {
                throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg());
            }
            String fullViewName = tab.getFullyQualifiedName();
            // Prevent view cycles
            if (viewsExpanded.contains(fullViewName)) {
                throw new SemanticException("Recursive view " + fullViewName + " detected (cycle: " + StringUtils.join(viewsExpanded, " -> ") + " -> " + fullViewName + ").");
            }
            replaceViewReferenceWithDefinition(qb, tab, tabName, alias);
            // If the view is Inside another view, it should have at least one parent
            if (qb.isInsideView() && parentInput == null) {
                parentInput = PlanUtils.getParentViewInfo(getAliasId(alias, qb), viewAliasToInput);
            }
            ReadEntity viewInput = new ReadEntity(tab, parentInput, !qb.isInsideView());
            viewInput = PlanUtils.addInput(inputs, viewInput);
            aliasToViewInfo.put(alias, Pair.of(fullViewName, viewInput));
            String aliasId = getAliasId(alias, qb);
            if (aliasId != null) {
                aliasId = aliasId.replace(SemanticAnalyzer.SUBQUERY_TAG_1, "").replace(SemanticAnalyzer.SUBQUERY_TAG_2, "");
            }
            viewAliasToInput.put(aliasId, viewInput);
            continue;
        }
        if (!InputFormat.class.isAssignableFrom(tab.getInputFormatClass())) {
            throw new SemanticException(generateErrorMessage(qb.getParseInfo().getSrcForAlias(alias), ErrorMsg.INVALID_INPUT_FORMAT_TYPE.getMsg()));
        }
        qb.getMetaData().setSrcForAlias(alias, tab);
        if (qb.getParseInfo().isAnalyzeCommand()) {
            // allow partial partition specification for nonscan since noscan is fast.
            TableSpec ts = new TableSpec(db, conf, (ASTNode) ast.getChild(0), true, this.noscan);
            if (ts.specType == SpecType.DYNAMIC_PARTITION) {
                // dynamic partitions
                try {
                    ts.partitions = db.getPartitionsByNames(ts.tableHandle, ts.partSpec);
                } catch (HiveException e) {
                    throw new SemanticException(generateErrorMessage(qb.getParseInfo().getSrcForAlias(alias), "Cannot get partitions for " + ts.partSpec), e);
                }
            }
            tab.setTableSpec(ts);
            qb.getParseInfo().addTableSpec(alias, ts);
        }
        ReadEntity parentViewInfo = PlanUtils.getParentViewInfo(getAliasId(alias, qb), viewAliasToInput);
        // Temporary tables created during the execution are not the input sources
        if (!PlanUtils.isValuesTempTable(alias)) {
            PlanUtils.addInput(inputs, new ReadEntity(tab, parentViewInfo, parentViewInfo == null), mergeIsDirect);
        }
    }
    LOG.info("Get metadata for subqueries");
    // Go over the subqueries and getMetaData for these
    for (String alias : qb.getSubqAliases()) {
        boolean wasView = aliasToViewInfo.containsKey(alias);
        boolean wasCTE = sqAliasToCTEName.containsKey(alias);
        ReadEntity newParentInput = null;
        if (wasView) {
            viewsExpanded.add(aliasToViewInfo.get(alias).getLeft());
            newParentInput = aliasToViewInfo.get(alias).getRight();
        } else if (wasCTE) {
            ctesExpanded.add(sqAliasToCTEName.get(alias));
        }
        QBExpr qbexpr = qb.getSubqForAlias(alias);
        getMetaData(qbexpr, newParentInput);
        if (wasView) {
            viewsExpanded.remove(viewsExpanded.size() - 1);
        } else if (wasCTE) {
            ctesExpanded.remove(ctesExpanded.size() - 1);
        }
    }
    RowFormatParams rowFormatParams = new RowFormatParams();
    StorageFormat storageFormat = new StorageFormat(conf);
    LOG.info("Get metadata for destination tables");
    // Go over all the destination structures and populate the related
    // metadata
    QBParseInfo qbp = qb.getParseInfo();
    for (String name : qbp.getClauseNamesForDest()) {
        ASTNode ast = qbp.getDestForClause(name);
        switch(ast.getToken().getType()) {
            case HiveParser.TOK_TAB:
                {
                    TableSpec ts = new TableSpec(db, conf, ast);
                    if (ts.tableHandle.isView() || (mvRebuildMode == MaterializationRebuildMode.NONE && ts.tableHandle.isMaterializedView())) {
                        throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
                    }
                    Class<?> outputFormatClass = ts.tableHandle.getOutputFormatClass();
                    if (!ts.tableHandle.isNonNative() && !HiveOutputFormat.class.isAssignableFrom(outputFormatClass)) {
                        throw new SemanticException(ASTErrorUtils.getMsg(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg(), ast, "The class is " + outputFormatClass.toString()));
                    }
                    boolean isTableWrittenTo = qb.getParseInfo().isInsertIntoTable(ts.tableHandle.getDbName(), ts.tableHandle.getTableName());
                    isTableWrittenTo |= (qb.getParseInfo().getInsertOverwriteTables().get(getUnescapedName((ASTNode) ast.getChild(0), ts.tableHandle.getDbName()).toLowerCase()) != null);
                    assert isTableWrittenTo : "Inconsistent data structure detected: we are writing to " + ts.tableHandle + " in " + name + " but it's not in isInsertIntoTable() or getInsertOverwriteTables()";
                    // Disallow update and delete on non-acid tables
                    boolean isFullAcid = AcidUtils.isFullAcidTable(ts.tableHandle);
                    if ((updating(name) || deleting(name)) && !isFullAcid) {
                        if (!AcidUtils.isInsertOnlyTable(ts.tableHandle)) {
                            // here, it means the table itself doesn't support it.
                            throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.getTableName().getTable());
                        } else {
                            throw new SemanticException(ErrorMsg.ACID_OP_ON_INSERTONLYTRAN_TABLE, ts.getTableName().getTable());
                        }
                    }
                    // but whether the table itself is partitioned is not know.
                    if (ts.specType != SpecType.STATIC_PARTITION) {
                        // This is a table or dynamic partition
                        qb.getMetaData().setDestForAlias(name, ts.tableHandle);
                        // has dynamic as well as static partitions
                        if (ts.partSpec != null && ts.partSpec.size() > 0) {
                            qb.getMetaData().setPartSpecForAlias(name, ts.partSpec);
                        }
                    } else {
                        // This is a partition
                        qb.getMetaData().setDestForAlias(name, ts.partHandle);
                    }
                    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                        // Add the table spec for the destination table.
                        qb.getParseInfo().addTableSpec(ts.getTableName().getTable().toLowerCase(), ts);
                    }
                    break;
                }
            case HiveParser.TOK_DIR:
                {
                    // This is a dfs file
                    String fname = stripQuotes(ast.getChild(0).getText());
                    if ((!qb.getParseInfo().getIsSubQ()) && (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_TMP_FILE)) {
                        if (qb.isCTAS() || qb.isMaterializedView()) {
                            qb.setIsQuery(false);
                            ctx.setResDir(null);
                            ctx.setResFile(null);
                            Path location;
                            // If the CTAS query does specify a location, use the table location, else use the db location
                            if (qb.isMaterializedView() && qb.getViewDesc() != null && qb.getViewDesc().getLocation() != null) {
                                location = new Path(qb.getViewDesc().getLocation());
                            } else if (qb.isCTAS() && qb.getTableDesc() != null && qb.getTableDesc().getLocation() != null) {
                                location = new Path(qb.getTableDesc().getLocation());
                            } else {
                                // allocate a temporary output dir on the location of the table
                                String tableName = getUnescapedName((ASTNode) ast.getChild(0));
                                String[] names = Utilities.getDbTableName(tableName);
                                try {
                                    Warehouse wh = new Warehouse(conf);
                                    // Use destination table's db location.
                                    String destTableDb = qb.getTableDesc() != null ? qb.getTableDesc().getDatabaseName() : null;
                                    if (destTableDb == null) {
                                        destTableDb = names[0];
                                    }
                                    boolean useExternal = false;
                                    if (qb.isMaterializedView()) {
                                        useExternal = !AcidUtils.isTransactionalView(qb.getViewDesc()) && !makeAcid();
                                    } else {
                                        useExternal = (qb.getTableDesc() == null || qb.getTableDesc().isTemporary() || qb.getTableDesc().isExternal() || !makeAcid());
                                    }
                                    if (useExternal) {
                                        location = wh.getDatabaseExternalPath(db.getDatabase(destTableDb));
                                    } else {
                                        location = wh.getDatabaseManagedPath(db.getDatabase(destTableDb));
                                    }
                                } catch (MetaException e) {
                                    throw new SemanticException(e);
                                }
                            }
                            try {
                                CreateTableDesc tblDesc = qb.getTableDesc();
                                if (tblDesc != null && tblDesc.isTemporary() && AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
                                    fname = FileUtils.makeQualified(location, conf).toString();
                                } else {
                                    fname = ctx.getExtTmpPathRelTo(FileUtils.makeQualified(location, conf)).toString();
                                }
                            } catch (Exception e) {
                                throw new SemanticException(generateErrorMessage(ast, "Error creating temporary folder on: " + location.toString()), e);
                            }
                            if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                                TableSpec ts = new TableSpec(db, conf, this.ast);
                                // Add the table spec for the destination table.
                                qb.getParseInfo().addTableSpec(ts.getTableName().getTable().toLowerCase(), ts);
                            }
                        } else {
                            // This is the only place where isQuery is set to true; it defaults to false.
                            qb.setIsQuery(true);
                            Path stagingPath = getStagingDirectoryPathname(qb);
                            fname = stagingPath.toString();
                            ctx.setResDir(stagingPath);
                        }
                    }
                    boolean isDfsFile = true;
                    if (ast.getChildCount() >= 2 && ast.getChild(1).getText().toLowerCase().equals("local")) {
                        isDfsFile = false;
                    }
                    // Set the destination for the SELECT query inside the CTAS
                    qb.getMetaData().setDestForAlias(name, fname, isDfsFile);
                    CreateTableDesc directoryDesc = new CreateTableDesc();
                    boolean directoryDescIsSet = false;
                    int numCh = ast.getChildCount();
                    for (int num = 1; num < numCh; num++) {
                        ASTNode child = (ASTNode) ast.getChild(num);
                        if (child != null) {
                            if (storageFormat.fillStorageFormat(child)) {
                                directoryDesc.setInputFormat(storageFormat.getInputFormat());
                                directoryDesc.setOutputFormat(storageFormat.getOutputFormat());
                                directoryDesc.setSerName(storageFormat.getSerde());
                                directoryDescIsSet = true;
                                continue;
                            }
                            switch(child.getToken().getType()) {
                                case HiveParser.TOK_TABLEROWFORMAT:
                                    rowFormatParams.analyzeRowFormat(child);
                                    directoryDesc.setFieldDelim(rowFormatParams.fieldDelim);
                                    directoryDesc.setLineDelim(rowFormatParams.lineDelim);
                                    directoryDesc.setCollItemDelim(rowFormatParams.collItemDelim);
                                    directoryDesc.setMapKeyDelim(rowFormatParams.mapKeyDelim);
                                    directoryDesc.setFieldEscape(rowFormatParams.fieldEscape);
                                    directoryDesc.setNullFormat(rowFormatParams.nullFormat);
                                    directoryDescIsSet = true;
                                    break;
                                case HiveParser.TOK_TABLESERIALIZER:
                                    ASTNode serdeChild = (ASTNode) child.getChild(0);
                                    storageFormat.setSerde(unescapeSQLString(serdeChild.getChild(0).getText()));
                                    directoryDesc.setSerName(storageFormat.getSerde());
                                    if (serdeChild.getChildCount() > 1) {
                                        directoryDesc.setSerdeProps(new HashMap<String, String>());
                                        readProps((ASTNode) serdeChild.getChild(1).getChild(0), directoryDesc.getSerdeProps());
                                    }
                                    directoryDescIsSet = true;
                                    break;
                            }
                        }
                    }
                    if (directoryDescIsSet) {
                        qb.setDirectoryDesc(directoryDesc);
                    }
                    break;
                }
            default:
                throw new SemanticException(generateErrorMessage(ast, "Unknown Token Type " + ast.getToken().getType()));
        }
    }
}
Also used : Warehouse(org.apache.hadoop.hive.metastore.Warehouse) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) Pair(org.apache.commons.lang3.tuple.Pair) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Path(org.apache.hadoop.fs.Path) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) PatternSyntaxException(java.util.regex.PatternSyntaxException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(java.security.AccessControlException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) AcidInputFormat(org.apache.hadoop.hive.ql.io.AcidInputFormat) InputFormat(org.apache.hadoop.mapred.InputFormat) FileInputFormat(org.apache.hadoop.mapreduce.lib.input.FileInputFormat) NullRowsInputFormat(org.apache.hadoop.hive.ql.io.NullRowsInputFormat)

Example 10 with CreateTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc in project hive by apache.

the class TaskCompiler method setLoadFileLocation.

private void setLoadFileLocation(final ParseContext pCtx, LoadFileDesc lfd) throws SemanticException {
    // CTAS; make the movetask's destination directory the table's destination.
    Long txnId = null;
    // CTAS or CMV cannot be part of multi-txn stmt
    int stmtId = 0;
    FileSinkDesc dataSink = null;
    String loc = null;
    if (pCtx.getQueryProperties().isCTAS()) {
        CreateTableDesc ctd = pCtx.getCreateTable();
        dataSink = ctd.getAndUnsetWriter();
        txnId = ctd.getInitialMmWriteId();
        loc = ctd.getLocation();
    } else {
        CreateMaterializedViewDesc cmv = pCtx.getCreateViewDesc();
        dataSink = cmv.getAndUnsetWriter();
        txnId = cmv.getInitialMmWriteId();
        loc = cmv.getLocation();
    }
    Path location = (loc == null) ? getDefaultCtasLocation(pCtx) : new Path(loc);
    if (pCtx.getQueryProperties().isCTAS()) {
        CreateTableDesc ctd = pCtx.getCreateTable();
        if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.CREATE_TABLE_AS_EXTERNAL)) {
            // create as external table
            ctd.getTblProps().put(hive_metastoreConstants.CTAS_LEGACY_CONFIG, "true");
        }
        try {
            Table table = ctd.toTable(conf);
            table = db.getTranslateTableDryrun(table.getTTable());
            org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
            if (tTable.getSd() != null && tTable.getSd().getLocation() != null) {
                location = new Path(tTable.getSd().getLocation());
            }
            ctd.getTblProps().remove(hive_metastoreConstants.CTAS_LEGACY_CONFIG);
            ctd.fromTable(tTable);
        } catch (HiveException ex) {
            throw new SemanticException(ex);
        }
        pCtx.setCreateTable(ctd);
    }
    if (txnId != null) {
        dataSink.setDirName(location);
        location = new Path(location, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
        lfd.setSourcePath(location);
        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
            Utilities.FILE_OP_LOGGER.trace("Setting MM CTAS to " + location);
        }
    }
    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
        Utilities.FILE_OP_LOGGER.trace("Location for LFD is being set to " + location + "; moving from " + lfd.getSourcePath());
    }
    lfd.setTargetDir(location);
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc)

Aggregations

CreateTableDesc (org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc)12 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)7 Path (org.apache.hadoop.fs.Path)5 DDLTask (org.apache.hadoop.hive.ql.ddl.DDLTask)5 HiveStorageHandler (org.apache.hadoop.hive.ql.metadata.HiveStorageHandler)5 Table (org.apache.hadoop.hive.ql.metadata.Table)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 LinkedHashMap (java.util.LinkedHashMap)4 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)4 FileNotFoundException (java.io.FileNotFoundException)3 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)3 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)3 CreateMaterializedViewDesc (org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc)3 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)3 LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)3 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)3 AccessControlException (java.security.AccessControlException)2 LinkedList (java.util.LinkedList)2