Search in sources :

Example 21 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class SemanticAnalyzer method analyzeCreateTable.

/**
 * Analyze the create table command. If it is a regular create-table or
 * create-table-like statements, we create a DDLWork and return true. If it is
 * a create-table-as-select, we get the necessary info such as the SerDe and
 * Storage Format and put it in QB, and return false, indicating the rest of
 * the semantic analyzer need to deal with the select statement with respect
 * to the SerDe and Storage Format.
 */
ASTNode analyzeCreateTable(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
    TableName qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
    final String dbDotTab = qualifiedTabName.getNotEmptyDbTable();
    String likeTableName = null;
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> partColNames = new ArrayList<>();
    List<String> bucketCols = new ArrayList<String>();
    List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
    List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
    List<SQLUniqueConstraint> uniqueConstraints = new ArrayList<>();
    List<SQLNotNullConstraint> notNullConstraints = new ArrayList<>();
    List<SQLDefaultConstraint> defaultConstraints = new ArrayList<>();
    List<SQLCheckConstraint> checkConstraints = new ArrayList<>();
    List<Order> sortCols = new ArrayList<Order>();
    int numBuckets = -1;
    String comment = null;
    String location = null;
    Map<String, String> tblProps = null;
    boolean ifNotExists = false;
    boolean isExt = false;
    boolean isTemporary = false;
    boolean isManaged = false;
    boolean isMaterialization = false;
    boolean isTransactional = false;
    ASTNode selectStmt = null;
    // regular CREATE TABLE
    final int CREATE_TABLE = 0;
    // CREATE TABLE LIKE ... (CTLT)
    final int CTLT = 1;
    // CREATE TABLE AS SELECT ... (CTAS)
    final int CTAS = 2;
    // CREATE TRANSACTIONAL TABLE
    final int ctt = 3;
    int command_type = CREATE_TABLE;
    List<String> skewedColNames = new ArrayList<String>();
    List<List<String>> skewedValues = new ArrayList<List<String>>();
    Map<List<String>, String> listBucketColValuesMapping = new HashMap<List<String>, String>();
    boolean storedAsDirs = false;
    boolean isUserStorageFormat = false;
    boolean partitionTransformSpecExists = false;
    RowFormatParams rowFormatParams = new RowFormatParams();
    StorageFormat storageFormat = new StorageFormat(conf);
    LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
    int numCh = ast.getChildCount();
    // set storage handler if default handler is provided in config
    String defaultStorageHandler = HiveConf.getVar(conf, HIVE_DEFAULT_STORAGE_HANDLER);
    if (defaultStorageHandler != null && !defaultStorageHandler.isEmpty()) {
        LOG.info("Default storage handler class detected in config. Using storage handler class if exists: '{}'", defaultStorageHandler);
        storageFormat.setStorageHandler(defaultStorageHandler);
        isUserStorageFormat = true;
    }
    /*
     * Check the 1st-level children and do simple semantic checks: 1) CTLT and
     * CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
     * list (target table schema). 3) CTAS does not support partitioning (for
     * now).
     */
    for (int num = 1; num < numCh; num++) {
        ASTNode child = (ASTNode) ast.getChild(num);
        if (storageFormat.fillStorageFormat(child)) {
            isUserStorageFormat = true;
            continue;
        }
        switch(child.getToken().getType()) {
            case HiveParser.TOK_IFNOTEXISTS:
                ifNotExists = true;
                break;
            case HiveParser.KW_EXTERNAL:
                isExt = true;
                break;
            case HiveParser.KW_MANAGED:
                isManaged = true;
                isTransactional = true;
                break;
            case HiveParser.KW_TEMPORARY:
                isTemporary = true;
                isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
                break;
            case HiveParser.KW_TRANSACTIONAL:
                isTransactional = true;
                command_type = ctt;
                break;
            case HiveParser.TOK_LIKETABLE:
                if (child.getChildCount() > 0) {
                    likeTableName = getUnescapedName((ASTNode) child.getChild(0));
                    if (likeTableName != null) {
                        if (command_type == CTAS) {
                            throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
                        }
                        if (cols.size() != 0) {
                            throw new SemanticException(ErrorMsg.CTLT_COLLST_COEXISTENCE.getMsg());
                        }
                    }
                    command_type = CTLT;
                }
                break;
            case // CTAS
            HiveParser.TOK_QUERY:
                if (command_type == CTLT) {
                    throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
                }
                if (cols.size() != 0) {
                    throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg());
                }
                if (partCols.size() != 0 || bucketCols.size() != 0) {
                    boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING);
                    if (dynPart == false) {
                        throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
                    } else {
                        // TODO: support dynamic partition for CTAS
                        throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
                    }
                }
                if (!conf.getBoolVar(ConfVars.HIVE_CTAS_EXTERNAL_TABLES) && isExt) {
                    throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
                }
                command_type = CTAS;
                if (plannerCtx != null) {
                    plannerCtx.setCTASToken(child);
                }
                selectStmt = child;
                break;
            case HiveParser.TOK_TABCOLLIST:
                cols = getColumns(child, true, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf);
                break;
            case HiveParser.TOK_TABLECOMMENT:
                comment = unescapeSQLString(child.getChild(0).getText());
                break;
            case HiveParser.TOK_TABLEPARTCOLS:
                partCols = getColumns(child, false, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf);
                if (hasConstraints(partCols, defaultConstraints, notNullConstraints, checkConstraints)) {
                    // TODO: these constraints should be supported for partition columns
                    throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("NOT NULL,DEFAULT and CHECK Constraints are not allowed with " + "partition columns. "));
                }
                break;
            case HiveParser.TOK_TABLEPARTCOLSBYSPEC:
                List<PartitionTransformSpec> partitionTransformSpec = PartitionTransform.getPartitionTransformSpec(child);
                if (!SessionStateUtil.addResource(conf, hive_metastoreConstants.PARTITION_TRANSFORM_SPEC, partitionTransformSpec)) {
                    throw new SemanticException("Query state attached to Session state must be not null. " + "Partition transform metadata cannot be saved.");
                }
                partitionTransformSpecExists = true;
                break;
            case HiveParser.TOK_TABLEPARTCOLNAMES:
                partColNames = getColumnNames(child);
                break;
            case HiveParser.TOK_ALTERTABLE_BUCKETS:
                bucketCols = getColumnNames((ASTNode) child.getChild(0));
                if (child.getChildCount() == 2) {
                    numBuckets = Integer.parseInt(child.getChild(1).getText());
                } else {
                    sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
                    numBuckets = Integer.parseInt(child.getChild(2).getText());
                }
                break;
            case HiveParser.TOK_TABLEROWFORMAT:
                rowFormatParams.analyzeRowFormat(child);
                break;
            case HiveParser.TOK_TABLELOCATION:
                location = unescapeSQLString(child.getChild(0).getText());
                location = EximUtil.relativeToAbsolutePath(conf, location);
                inputs.add(toReadEntity(location));
                break;
            case HiveParser.TOK_TABLEPROPERTIES:
                tblProps = getProps((ASTNode) child.getChild(0));
                addPropertyReadEntry(tblProps, inputs);
                break;
            case HiveParser.TOK_TABLESERIALIZER:
                child = (ASTNode) child.getChild(0);
                storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
                if (child.getChildCount() == 2) {
                    readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
                }
                break;
            case HiveParser.TOK_TABLESKEWED:
                /**
                 * Throw an error if the user tries to use the DDL with
                 * hive.internal.ddl.list.bucketing.enable set to false.
                 */
                HiveConf hiveConf = SessionState.get().getConf();
                // skewed column names
                skewedColNames = SkewedTableUtils.analyzeSkewedTableDDLColNames(child);
                // skewed value
                skewedValues = SkewedTableUtils.analyzeDDLSkewedValues(child);
                // stored as directories
                storedAsDirs = analyzeStoredAdDirs(child);
                break;
            default:
                throw new AssertionError("Unknown token: " + child.getToken());
        }
    }
    HiveStorageHandler handler;
    try {
        handler = HiveUtils.getStorageHandler(conf, storageFormat.getStorageHandler());
    } catch (HiveException e) {
        throw new SemanticException("Failed to load storage handler:  " + e.getMessage());
    }
    if (handler != null) {
        if (partitionTransformSpecExists && !handler.supportsPartitionTransform()) {
            throw new SemanticException("Partition transform is not supported for " + handler.getClass().getName());
        }
        String fileFormatPropertyKey = handler.getFileFormatPropertyKey();
        if (fileFormatPropertyKey != null) {
            if (tblProps != null && tblProps.containsKey(fileFormatPropertyKey) && storageFormat.getSerdeProps() != null && storageFormat.getSerdeProps().containsKey(fileFormatPropertyKey)) {
                String fileFormat = tblProps.get(fileFormatPropertyKey);
                throw new SemanticException("Provide only one of the following: STORED BY " + fileFormat + " or WITH SERDEPROPERTIES('" + fileFormatPropertyKey + "'='" + fileFormat + "') or" + " TBLPROPERTIES('" + fileFormatPropertyKey + "'='" + fileFormat + "')");
            }
        }
    }
    if (command_type == CREATE_TABLE || command_type == CTLT || command_type == ctt) {
        queryState.setCommandType(HiveOperation.CREATETABLE);
    } else if (command_type == CTAS) {
        queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
    } else {
        throw new SemanticException("Unrecognized command.");
    }
    if (isExt && ConstraintsUtils.hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints, checkConstraints)) {
        throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed."));
    }
    if (checkConstraints != null && !checkConstraints.isEmpty()) {
        ConstraintsUtils.validateCheckConstraint(cols, checkConstraints, ctx.getConf());
    }
    storageFormat.fillDefaultStorageFormat(isExt, false);
    // check for existence of table
    if (ifNotExists) {
        try {
            Table table = getTable(qualifiedTabName, false);
            if (table != null) {
                // table exists
                return null;
            }
        } catch (HiveException e) {
            // should not occur since second parameter to getTableWithQN is false
            throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e);
        }
    }
    if (isTemporary) {
        if (location == null) {
            // it has the same life cycle as the tmp table
            try {
                // Generate a unique ID for temp table path.
                // This path will be fixed for the life of the temp table.
                location = SessionState.generateTempTableLocation(conf);
            } catch (MetaException err) {
                throw new SemanticException("Error while generating temp table path:", err);
            }
        }
    }
    switch(command_type) {
        case // REGULAR CREATE TABLE DDL
        CREATE_TABLE:
            if (!CollectionUtils.isEmpty(partColNames)) {
                throw new SemanticException("Partition columns can only declared using their name and types in regular CREATE TABLE statements");
            }
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
            CreateTableDesc crtTblDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
            crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
            crtTblDesc.validate(conf);
            // outputs is empty, which means this create table happens in the current
            // database.
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc)));
            break;
        case // CREATE TRANSACTIONAL TABLE
        ctt:
            if (isExt) {
                throw new SemanticException(qualifiedTabName.getTable() + " cannot be declared transactional because it's an external table");
            }
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, false, tblProps, storageFormat);
            CreateTableDesc crtTranTblDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            crtTranTblDesc.setStoredAsSubDirectories(storedAsDirs);
            crtTranTblDesc.setNullFormat(rowFormatParams.nullFormat);
            crtTranTblDesc.validate(conf);
            // outputs is empty, which means this create table happens in the current
            // database.
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTranTblDesc)));
            break;
        case // create table like <tbl_name>
        CTLT:
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
            Table likeTable = getTable(likeTableName, false);
            if (likeTable != null) {
                if (isTemporary || isExt) {
                    updateDefaultTblProps(likeTable.getParameters(), tblProps, new ArrayList<>(Arrays.asList(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES)));
                } else {
                    updateDefaultTblProps(likeTable.getParameters(), tblProps, null);
                }
            }
            CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat);
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc)));
            break;
        case // create table as select
        CTAS:
            if (isTemporary) {
                if (!ctx.isExplainSkipExecution() && !isMaterialization) {
                    SessionState ss = SessionState.get();
                    if (ss == null) {
                        throw new SemanticException("No current SessionState, cannot create temporary table " + qualifiedTabName.getNotEmptyDbTable());
                    }
                    Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(qualifiedTabName.getDb(), qualifiedTabName.getTable());
                    if (tables != null && tables.containsKey(qualifiedTabName.getTable())) {
                        throw new SemanticException("Temporary table " + qualifiedTabName.getNotEmptyDbTable() + " already exists");
                    }
                }
            } else {
                // dumpTable is only used to check the conflict for non-temporary tables
                try {
                    Table dumpTable = db.newTable(dbDotTab);
                    if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
                        throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
                    }
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
            }
            if (location != null && location.length() != 0) {
                Path locPath = new Path(location);
                FileSystem curFs = null;
                FileStatus locStats = null;
                try {
                    curFs = locPath.getFileSystem(conf);
                    if (curFs != null) {
                        locStats = curFs.getFileStatus(locPath);
                    }
                    if (locStats != null && locStats.isDir()) {
                        FileStatus[] lStats = curFs.listStatus(locPath);
                        if (lStats != null && lStats.length != 0) {
                            // Don't throw an exception if the target location only contains the staging-dirs
                            for (FileStatus lStat : lStats) {
                                if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) {
                                    throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location));
                                }
                            }
                        }
                    }
                } catch (FileNotFoundException nfe) {
                // we will create the folder if it does not exist.
                } catch (IOException ioE) {
                    LOG.debug("Exception when validate folder", ioE);
                }
            }
            if (!CollectionUtils.isEmpty(partCols)) {
                throw new SemanticException("Partition columns can only declared using their names in CTAS statements");
            }
            tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
            tblProps.put(TABLE_IS_CTAS, "true");
            addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
            tableDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partColNames, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            tableDesc.setMaterialization(isMaterialization);
            tableDesc.setStoredAsSubDirectories(storedAsDirs);
            tableDesc.setNullFormat(rowFormatParams.nullFormat);
            qb.setTableDesc(tableDesc);
            return selectStmt;
        default:
            throw new SemanticException("Unrecognized command.");
    }
    return null;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) FileStatus(org.apache.hadoop.fs.FileStatus) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) FileSystem(org.apache.hadoop.fs.FileSystem) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) HiveConf(org.apache.hadoop.hive.conf.HiveConf) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Order(org.apache.hadoop.hive.metastore.api.Order) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) TableName(org.apache.hadoop.hive.common.TableName) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) CreateTableLikeDesc(org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Path(org.apache.hadoop.fs.Path) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork)

Example 22 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class SemanticAnalyzer method analyzeCreateView.

protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
    TableName qualTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
    final String dbDotTable = qualTabName.getNotEmptyDbTable();
    List<FieldSchema> cols = null;
    boolean ifNotExists = false;
    boolean rewriteEnabled = true;
    String comment = null;
    ASTNode selectStmt = null;
    Map<String, String> tblProps = null;
    List<String> partColNames = null;
    List<String> sortColNames = null;
    List<String> distributeColNames = null;
    String location = null;
    RowFormatParams rowFormatParams = new RowFormatParams();
    StorageFormat storageFormat = new StorageFormat(conf);
    LOG.info("Creating view " + dbDotTable + " position=" + ast.getCharPositionInLine());
    int numCh = ast.getChildCount();
    for (int num = 1; num < numCh; num++) {
        ASTNode child = (ASTNode) ast.getChild(num);
        if (storageFormat.fillStorageFormat(child)) {
            continue;
        }
        switch(child.getToken().getType()) {
            case HiveParser.TOK_IFNOTEXISTS:
                ifNotExists = true;
                break;
            case HiveParser.TOK_REWRITE_DISABLED:
                rewriteEnabled = false;
                break;
            case HiveParser.TOK_QUERY:
                // For CBO
                if (plannerCtx != null) {
                    plannerCtx.setViewToken(child);
                }
                selectStmt = child;
                break;
            case HiveParser.TOK_TABCOLNAME:
                cols = getColumns(child);
                break;
            case HiveParser.TOK_TABLECOMMENT:
                comment = unescapeSQLString(child.getChild(0).getText());
                break;
            case HiveParser.TOK_TABLEPROPERTIES:
                tblProps = getProps((ASTNode) child.getChild(0));
                break;
            case HiveParser.TOK_VIEWPARTCOLS:
                partColNames = getColumnNames((ASTNode) child.getChild(0));
                break;
            case HiveParser.TOK_VIEWCLUSTERCOLS:
                assert distributeColNames == null && sortColNames == null;
                distributeColNames = getColumnNames((ASTNode) child.getChild(0));
                sortColNames = new ArrayList<>(distributeColNames);
                break;
            case HiveParser.TOK_VIEWDISTRIBUTECOLS:
                assert distributeColNames == null;
                distributeColNames = getColumnNames((ASTNode) child.getChild(0));
                break;
            case HiveParser.TOK_VIEWSORTCOLS:
                assert sortColNames == null;
                sortColNames = getColumnNames((ASTNode) child.getChild(0));
                break;
            case HiveParser.TOK_TABLEROWFORMAT:
                rowFormatParams.analyzeRowFormat(child);
                break;
            case HiveParser.TOK_TABLELOCATION:
                location = unescapeSQLString(child.getChild(0).getText());
                location = EximUtil.relativeToAbsolutePath(conf, location);
                inputs.add(toReadEntity(location));
                break;
            case HiveParser.TOK_TABLESERIALIZER:
                child = (ASTNode) child.getChild(0);
                storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
                if (child.getChildCount() == 2) {
                    readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
                }
                break;
            default:
                assert false;
        }
    }
    storageFormat.fillDefaultStorageFormat(false, true);
    if (!ifNotExists) {
        // dumpTable is only used to check the conflict for non-temporary tables
        try {
            Table dumpTable = db.newTable(dbDotTable);
            if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
                throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTable));
            }
        } catch (HiveException e) {
            throw new SemanticException(e);
        }
    }
    if (partColNames != null && (distributeColNames != null || sortColNames != null)) {
        // Verify that partition columns and data organization columns are not overlapping
        Set<String> partColNamesSet = new HashSet<>(partColNames);
        if (distributeColNames != null) {
            for (String colName : distributeColNames) {
                if (partColNamesSet.contains(colName)) {
                    throw new SemanticException("Same column cannot be present in partition and cluster/distribute clause. " + "Column name: " + colName);
                }
            }
        }
        if (sortColNames != null) {
            for (String colName : sortColNames) {
                if (partColNamesSet.contains(colName)) {
                    throw new SemanticException("Same column cannot be present in partition and cluster/sort clause. " + "Column name: " + colName);
                }
            }
        }
    }
    unparseTranslator.enable();
    if (makeAcid()) {
        if (tblProps == null) {
            tblProps = new HashMap<>();
        }
        tblProps = convertToAcidByDefault(storageFormat, dbDotTable, null, tblProps);
    }
    createVwDesc = new CreateMaterializedViewDesc(dbDotTable, cols, comment, tblProps, partColNames, sortColNames, distributeColNames, ifNotExists, rewriteEnabled, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps());
    addDbAndTabToOutputs(new String[] { qualTabName.getDb(), qualTabName.getTable() }, TableType.MATERIALIZED_VIEW, false, tblProps, storageFormat);
    queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW);
    qb.setViewDesc(createVwDesc);
    return selectStmt;
}
Also used : SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) TableName(org.apache.hadoop.hive.common.TableName) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HashSet(java.util.HashSet)

Example 23 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class SemanticAnalyzer method genFileSinkPlan.

@SuppressWarnings("nls")
protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException {
    RowResolver inputRR = opParseCtx.get(input).getRowResolver();
    QBMetaData qbm = qb.getMetaData();
    Integer destType = qbm.getDestTypeForAlias(dest);
    // destination table if any
    Table destinationTable = null;
    // true for full ACID table and MM table
    boolean destTableIsTransactional;
    // should the destination table be written to using ACID
    boolean destTableIsFullAcid;
    // should we add files directly to the final path
    boolean isDirectInsert = false;
    AcidUtils.Operation acidOperation = null;
    boolean destTableIsTemporary = false;
    boolean destTableIsMaterialization = false;
    // destination partition if any
    Partition destinationPartition = null;
    // the intermediate destination directory
    Path queryTmpdir = null;
    String moveTaskId = null;
    // the final destination directory
    Path destinationPath = null;
    TableDesc tableDescriptor = null;
    StructObjectInspector specificRowObjectInspector = null;
    int currentTableId = 0;
    boolean isLocal = false;
    SortBucketRSCtx rsCtx = new SortBucketRSCtx();
    DynamicPartitionCtx dpCtx = null;
    LoadTableDesc ltd = null;
    ListBucketingCtx lbCtx = null;
    Map<String, String> partSpec = null;
    boolean isMmTable = false, isMmCreate = false, isNonNativeTable = false;
    Long writeId = null;
    HiveTxnManager txnMgr = getTxnMgr();
    switch(destType.intValue()) {
        case QBMetaData.DEST_TABLE:
            {
                destinationTable = qbm.getDestTableForAlias(dest);
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                destTableIsTemporary = destinationTable.isTemporary();
                // Is the user trying to insert into a external tables
                checkExternalTable(destinationTable);
                partSpec = qbm.getPartSpecForAlias(dest);
                destinationPath = destinationTable.getPath();
                checkImmutableTable(qb, destinationTable, destinationPath, false);
                // check for partition
                List<FieldSchema> parts = destinationTable.getPartitionKeys();
                if (parts != null && parts.size() > 0) {
                    // table is partitioned
                    if (partSpec == null || partSpec.size() == 0) {
                        // user did NOT specify partition
                        throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
                    }
                    dpCtx = qbm.getDPCtx(dest);
                    if (dpCtx == null) {
                        destinationTable.validatePartColumnNames(partSpec, false);
                        dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                        qbm.setDPCtx(dest, dpCtx);
                    }
                }
                // Check for dynamic partitions.
                dpCtx = checkDynPart(qb, qbm, destinationTable, partSpec, dest);
                if (dpCtx != null && dpCtx.getSPPath() != null) {
                    destinationPath = new Path(destinationTable.getPath(), dpCtx.getSPPath());
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + destinationPath);
                }
                if (dpCtx != null) {
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, parts);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                // NOTE: specify Dynamic partitions in dest_tab for WriteEntity
                if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                        checkAcidConstraints();
                    } else {
                        lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(), destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(), destinationTable.isStoredAsSubDirectories());
                    }
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = null;
                        } else {
                            if (isMmTable) {
                                writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            } else {
                                writeId = acidOp == Operation.NOT_ACID ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            }
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    boolean isReplace = !qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, isReplace, writeId);
                    if (writeId != null) {
                        ltd.setStmtId(txnMgr.getCurrentStmtId());
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                    // deltas and base and leave them up to the cleaner to clean up
                    boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    LoadFileType loadType;
                    if (isDirectInsert) {
                        loadType = LoadFileType.IGNORE;
                    } else if (!isInsertInto && !destTableIsTransactional) {
                        loadType = LoadFileType.REPLACE_ALL;
                    } else {
                        loadType = LoadFileType.KEEP_EXISTING;
                    }
                    ltd.setLoadFileType(loadType);
                    ltd.setInsertOverwrite(!isInsertInto);
                    ltd.setIsDirectInsert(isDirectInsert);
                    ltd.setLbCtx(lbCtx);
                    loadTableWork.add(ltd);
                } else {
                    // This is a non-native table.
                    // We need to set stats as inaccurate.
                    setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName());
                    // true if it is insert overwrite.
                    boolean overwrite = !qb.getParseInfo().isInsertIntoTable(String.format("%s.%s", destinationTable.getDbName(), destinationTable.getTableName()));
                    createPreInsertDesc(destinationTable, overwrite);
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, partSpec == null ? ImmutableMap.of() : partSpec);
                    ltd.setInsertOverwrite(overwrite);
                    ltd.setLoadFileType(overwrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING);
                }
                if (destinationTable.isMaterializedView()) {
                    materializedViewUpdateDesc = new MaterializedViewUpdateDesc(destinationTable.getFullyQualifiedName(), false, false, true);
                }
                WriteEntity output = generateTableWriteEntity(dest, destinationTable, partSpec, ltd, dpCtx);
                ctx.getLoadTableOutputMap().put(ltd, output);
                break;
            }
        case QBMetaData.DEST_PARTITION:
            {
                destinationPartition = qbm.getDestPartitionForAlias(dest);
                destinationTable = destinationPartition.getTable();
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                checkExternalTable(destinationTable);
                Path partPath = destinationPartition.getDataLocation();
                checkImmutableTable(qb, destinationTable, partPath, true);
                // Previous behavior (HIVE-1707) used to replace the partition's dfs with the table's dfs.
                // The changes in HIVE-19891 appears to no longer support that behavior.
                destinationPath = partPath;
                if (MetaStoreUtils.isArchived(destinationPartition.getTPartition())) {
                    try {
                        String conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, destinationTable, destinationPartition.getSpec());
                        String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
                        throw new SemanticException(message);
                    } catch (SemanticException err) {
                        throw err;
                    } catch (HiveException err) {
                        throw new SemanticException(err);
                    }
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + destinationPath);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, null);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                if (destTableIsTransactional) {
                    acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    checkAcidConstraints();
                } else {
                    // Transactional tables can't be list bucketed or have skewed cols
                    lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(), destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(), destinationPartition.isStoredAsSubDirectories());
                }
                try {
                    if (ctx.getExplainConfig() != null) {
                        // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                        writeId = null;
                    } else {
                        if (isMmTable) {
                            writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        } else {
                            writeId = (acidOp == Operation.NOT_ACID) ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        }
                    }
                } catch (LockException ex) {
                    throw new SemanticException("Failed to allocate write Id", ex);
                }
                ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, destinationPartition.getSpec(), acidOp, writeId);
                if (writeId != null) {
                    ltd.setStmtId(txnMgr.getCurrentStmtId());
                }
                // For the current context for generating File Sink Operator, it is either INSERT INTO or INSERT OVERWRITE.
                // So the next line works.
                boolean isInsertInto = !qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest);
                // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                // deltas and base and leave them up to the cleaner to clean up
                LoadFileType loadType;
                if (isDirectInsert) {
                    loadType = LoadFileType.IGNORE;
                } else if (!isInsertInto && !destTableIsTransactional) {
                    loadType = LoadFileType.REPLACE_ALL;
                } else {
                    loadType = LoadFileType.KEEP_EXISTING;
                }
                ltd.setLoadFileType(loadType);
                ltd.setInsertOverwrite(!isInsertInto);
                ltd.setIsDirectInsert(isDirectInsert);
                ltd.setLbCtx(lbCtx);
                ltd.setMoveTaskId(moveTaskId);
                loadTableWork.add(ltd);
                if (!outputs.add(new WriteEntity(destinationPartition, determineWriteType(ltd, dest)))) {
                    throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName()));
                }
                break;
            }
        case QBMetaData.DEST_LOCAL_FILE:
            isLocal = true;
        // fall through
        case QBMetaData.DEST_DFS_FILE:
            {
                destinationPath = getDestinationFilePath(qbm.getDestFileForAlias(dest), isMmTable);
                // CTAS case: the file output format and serde are defined by the create
                // table command rather than taking the default value
                List<FieldSchema> fieldSchemas = null;
                List<FieldSchema> partitionColumns = null;
                List<String> partitionColumnNames = null;
                List<FieldSchema> sortColumns = null;
                List<String> sortColumnNames = null;
                List<FieldSchema> distributeColumns = null;
                List<String> distributeColumnNames = null;
                List<ColumnInfo> fileSinkColInfos = null;
                List<ColumnInfo> sortColInfos = null;
                List<ColumnInfo> distributeColInfos = null;
                TableName tableName = null;
                Map<String, String> tblProps = null;
                CreateTableDesc tblDesc = qb.getTableDesc();
                CreateMaterializedViewDesc viewDesc = qb.getViewDesc();
                if (tblDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = tblDesc.getPartColNames();
                    fileSinkColInfos = new ArrayList<>();
                    destTableIsTemporary = tblDesc.isTemporary();
                    destTableIsMaterialization = tblDesc.isMaterialization();
                    tableName = TableName.fromString(tblDesc.getDbTableName(), null, tblDesc.getDatabaseName());
                    tblProps = tblDesc.getTblProps();
                } else if (viewDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = viewDesc.getPartColNames();
                    sortColumns = new ArrayList<>();
                    sortColumnNames = viewDesc.getSortColNames();
                    distributeColumns = new ArrayList<>();
                    distributeColumnNames = viewDesc.getDistributeColNames();
                    fileSinkColInfos = new ArrayList<>();
                    sortColInfos = new ArrayList<>();
                    distributeColInfos = new ArrayList<>();
                    destTableIsTemporary = false;
                    destTableIsMaterialization = false;
                    tableName = HiveTableName.ofNullableWithNoDefault(viewDesc.getViewName());
                    tblProps = viewDesc.getTblProps();
                }
                destTableIsTransactional = tblProps != null && AcidUtils.isTablePropertyTransactional(tblProps);
                if (destTableIsTransactional) {
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = 0L;
                        } else {
                            writeId = txnMgr.getTableWriteId(tableName.getDb(), tableName.getTable());
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    if (AcidUtils.isInsertOnlyTable(tblProps, true)) {
                        isMmTable = isMmCreate = true;
                        if (tblDesc != null) {
                            tblDesc.setInitialMmWriteId(writeId);
                        } else {
                            viewDesc.setInitialMmWriteId(writeId);
                        }
                    }
                }
                if (isLocal) {
                    assert !isMmTable;
                    // for local directory - we always write to map-red intermediate
                    // store and then copy to local fs
                    queryTmpdir = ctx.getMRTmpPath();
                } else {
                    // no copy is required. we may want to revisit this policy in future
                    try {
                        Path qPath = FileUtils.makeQualified(destinationPath, conf);
                        queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
                        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                            Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir + " from " + destinationPath + " (" + isMmTable + ")");
                        }
                    } catch (Exception e) {
                        throw new SemanticException("Error creating temporary folder on: " + destinationPath, e);
                    }
                }
                // Check for dynamic partitions.
                final String cols, colTypes;
                final boolean isPartitioned;
                if (dpCtx != null) {
                    throw new SemanticException("Dynamic partition context has already been created, this should not happen");
                }
                if (!CollectionUtils.isEmpty(partitionColumnNames)) {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, partitionColumnNames, sortColumnNames, distributeColumnNames, fieldSchemas, partitionColumns, sortColumns, distributeColumns, fileSinkColInfos, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    dpCtx = new DynamicPartitionCtx(partitionColumnNames, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                    qbm.setDPCtx(dest, dpCtx);
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                    isPartitioned = true;
                } else {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, sortColumnNames, distributeColumnNames, fieldSchemas, sortColumns, distributeColumns, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    isPartitioned = false;
                }
                // update the create table descriptor with the resulting schema.
                if (tblDesc != null) {
                    tblDesc.setCols(new ArrayList<>(fieldSchemas));
                    tblDesc.setPartCols(new ArrayList<>(partitionColumns));
                } else if (viewDesc != null) {
                    viewDesc.setSchema(new ArrayList<>(fieldSchemas));
                    viewDesc.setPartCols(new ArrayList<>(partitionColumns));
                    if (viewDesc.isOrganized()) {
                        viewDesc.setSortCols(new ArrayList<>(sortColumns));
                        viewDesc.setDistributeCols(new ArrayList<>(distributeColumns));
                    }
                }
                boolean isDestTempFile = true;
                if (ctx.isMRTmpFileURI(destinationPath.toUri().toString()) == false && ctx.isResultCacheDir(destinationPath) == false) {
                    // not a temp dir and not a result cache dir
                    idToTableNameMap.put(String.valueOf(destTableId), destinationPath.toUri().toString());
                    currentTableId = destTableId;
                    destTableId++;
                    isDestTempFile = false;
                }
                if (tblDesc == null) {
                    if (viewDesc != null) {
                        tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
                    } else if (qb.getIsQuery()) {
                        Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
                        String fileFormat = conf.getResultFileFormat().toString();
                        if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
                            serdeClass = ThriftJDBCBinarySerDe.class;
                            fileFormat = ResultFileFormat.SEQUENCEFILE.toString();
                            // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
                            // write out formatted thrift objects to SequenceFile
                            conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
                        } else if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
                            // If this output format is Llap, check to see if Arrow is requested
                            boolean useArrow = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW);
                            serdeClass = useArrow ? ArrowColumnarBatchSerDe.class : LazyBinarySerDe2.class;
                        }
                        tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
                    } else {
                        tableDescriptor = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
                    }
                } else {
                    tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes);
                }
                // if available, set location in table desc properties
                if (tblDesc != null && tblDesc.getLocation() != null && tableDescriptor != null && !tableDescriptor.getProperties().containsKey(hive_metastoreConstants.META_TABLE_LOCATION)) {
                    tableDescriptor.getProperties().setProperty(hive_metastoreConstants.META_TABLE_LOCATION, tblDesc.getLocation());
                }
                // We need a specific rowObjectInspector in this case
                try {
                    specificRowObjectInspector = (StructObjectInspector) tableDescriptor.getDeserializer(conf).getObjectInspector();
                } catch (Exception e) {
                    throw new SemanticException(e.getMessage(), e);
                }
                boolean isDfsDir = (destType == QBMetaData.DEST_DFS_FILE);
                try {
                    destinationTable = tblDesc != null ? tblDesc.toTable(conf) : viewDesc != null ? viewDesc.toTable(conf) : null;
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                if (viewDesc != null && viewDesc.isOrganized()) {
                    input = genMaterializedViewDataOrgPlan(sortColInfos, distributeColInfos, inputRR, input);
                }
                moveTaskId = getMoveTaskId();
                if (isPartitioned) {
                    // Create a SELECT that may reorder the columns if needed
                    RowResolver rowResolver = new RowResolver();
                    List<ExprNodeDesc> columnExprs = new ArrayList<>();
                    List<String> colNames = new ArrayList<>();
                    Map<String, ExprNodeDesc> colExprMap = new HashMap<>();
                    for (int i = 0; i < fileSinkColInfos.size(); i++) {
                        ColumnInfo ci = fileSinkColInfos.get(i);
                        ExprNodeDesc columnExpr = new ExprNodeColumnDesc(ci);
                        String name = getColumnInternalName(i);
                        rowResolver.put("", name, new ColumnInfo(name, columnExpr.getTypeInfo(), "", false));
                        columnExprs.add(columnExpr);
                        colNames.add(name);
                        colExprMap.put(name, columnExpr);
                    }
                    input = putOpInsertMap(OperatorFactory.getAndMakeChild(new SelectDesc(columnExprs, colNames), new RowSchema(rowResolver.getColumnInfos()), input), rowResolver);
                    input.setColumnExprMap(colExprMap);
                    // If this is a partitioned CTAS or MV statement, we are going to create a LoadTableDesc
                    // object. Although the table does not exist in metastore, we will swap the CreateTableTask
                    // and MoveTask resulting from this LoadTable so in this specific case, first we create
                    // the metastore table, then we move and commit the partitions. At least for the time being,
                    // this order needs to be enforced because metastore expects a table to exist before we can
                    // add any partitions to it.
                    isNonNativeTable = tableDescriptor.isNonNative();
                    if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                        AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                        if (destTableIsTransactional) {
                            acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                            checkAcidConstraints();
                        }
                        // isReplace = false in case concurrent operation is executed
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId);
                        if (writeId != null) {
                            ltd.setStmtId(txnMgr.getCurrentStmtId());
                        }
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                        ltd.setInsertOverwrite(false);
                        loadTableWork.add(ltd);
                    } else {
                        // This is a non-native table.
                        // We need to set stats as inaccurate.
                        setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName());
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec());
                        ltd.setInsertOverwrite(false);
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    ltd.setMdTable(destinationTable);
                    WriteEntity output = generateTableWriteEntity(dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx);
                    ctx.getLoadTableOutputMap().put(ltd, output);
                } else {
                    // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
                    LoadFileDesc loadFileDesc = new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, destinationPath, isDfsDir, cols, colTypes, // there is a change here - prev version had 'transactional', one before 'acid'
                    destTableIsFullAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCreate);
                    loadFileDesc.setMoveTaskId(moveTaskId);
                    loadFileWork.add(loadFileDesc);
                    try {
                        Path qualifiedPath = destinationPath.getFileSystem(conf).makeQualified(destinationPath);
                        if (!outputs.add(new WriteEntity(qualifiedPath, !isDfsDir, isDestTempFile))) {
                            throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationPath.toUri().toString()));
                        }
                    } catch (IOException ex) {
                        throw new SemanticException("Error while getting the full qualified path for the given directory: " + ex.getMessage());
                    }
                }
                break;
            }
        default:
            throw new SemanticException("Unknown destination type: " + destType);
    }
    inputRR = opParseCtx.get(input).getRowResolver();
    List<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
    if (updating(dest) || deleting(dest)) {
        vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true));
    } else {
        try {
            // If we already have a specific inspector (view or directory as a target) use that
            // Otherwise use the table deserializer to get the inspector
            StructObjectInspector rowObjectInspector = specificRowObjectInspector != null ? specificRowObjectInspector : (StructObjectInspector) destinationTable.getDeserializer().getObjectInspector();
            List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
            for (StructField field : fields) {
                vecCol.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), "", false));
            }
        } catch (Exception e) {
            throw new SemanticException(e.getMessage(), e);
        }
    }
    RowSchema fsRS = new RowSchema(vecCol);
    // The output files of a FileSink can be merged if they are either not being written to a table
    // or are being written to a table which is not bucketed
    // and table the table is not sorted
    boolean canBeMerged = (destinationTable == null || !((destinationTable.getNumBuckets() > 0) || (destinationTable.getSortCols() != null && destinationTable.getSortCols().size() > 0)));
    // If this table is working with ACID semantics, turn off merging
    canBeMerged &= !destTableIsFullAcid;
    // Generate the partition columns from the parent input
    if (destType == QBMetaData.DEST_TABLE || destType == QBMetaData.DEST_PARTITION) {
        genPartnCols(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
    }
    FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, tableDescriptor, destinationPartition, // this was 1/4 acid
    destinationPath, // this was 1/4 acid
    currentTableId, // this was 1/4 acid
    destTableIsFullAcid, // this was 1/4 acid
    destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, destinationTable, writeId, isMmCreate, destType, qb, isDirectInsert, acidOperation, moveTaskId);
    if (isMmCreate) {
        // Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
        if (tableDesc != null) {
            tableDesc.setWriter(fileSinkDesc);
        } else {
            createVwDesc.setWriter(fileSinkDesc);
        }
    }
    if (fileSinkDesc.getInsertOverwrite()) {
        if (ltd != null) {
            ltd.setInsertOverwrite(true);
        }
    }
    if (null != tableDescriptor && useBatchingSerializer(tableDescriptor.getSerdeClassName())) {
        fileSinkDesc.setIsUsingBatchingSerDe(true);
    } else {
        fileSinkDesc.setIsUsingBatchingSerDe(false);
    }
    Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(fileSinkDesc, fsRS, input), inputRR);
    // in case of a merge statement.
    if (!isDirectInsert || acidOperation == AcidUtils.Operation.INSERT) {
        handleLineage(ltd, output);
    }
    setWriteIdForSurrogateKeys(ltd, input);
    LOG.debug("Created FileSink Plan for clause: {}dest_path: {} row schema: {}", dest, destinationPath, inputRR);
    FileSinkOperator fso = (FileSinkOperator) output;
    fso.getConf().setTable(destinationTable);
    // and it is an insert overwrite or insert into table
    if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && destinationTable != null && (!destinationTable.isNonNative() || destinationTable.getStorageHandler().commitInMoveTask()) && !destTableIsTemporary && !destTableIsMaterialization && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
        if (destType == QBMetaData.DEST_TABLE) {
            genAutoColumnStatsGatheringPipeline(destinationTable, partSpec, input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_PARTITION) {
            genAutoColumnStatsGatheringPipeline(destinationTable, destinationPartition.getSpec(), input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_LOCAL_FILE || destType == QBMetaData.DEST_DFS_FILE) {
            // CTAS or CMV statement
            genAutoColumnStatsGatheringPipeline(destinationTable, null, input, false, true);
        }
    }
    return output;
}
Also used : LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableName(org.apache.hadoop.hive.common.TableName) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) PatternSyntaxException(java.util.regex.PatternSyntaxException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(java.security.AccessControlException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation)

Example 24 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class RewriteSemanticAnalyzer method getTable.

/**
 * @param throwException if false, return null if table doesn't exist, else throw
 */
protected static Table getTable(ASTNode tabRef, Hive db, boolean throwException) throws SemanticException {
    TableName tableName;
    switch(tabRef.getType()) {
        case HiveParser.TOK_TABREF:
            tableName = getQualifiedTableName((ASTNode) tabRef.getChild(0));
            break;
        case HiveParser.TOK_TABNAME:
            tableName = getQualifiedTableName(tabRef);
            break;
        default:
            throw raiseWrongType("TOK_TABREF|TOK_TABNAME", tabRef);
    }
    Table mTable;
    try {
        mTable = db.getTable(tableName.getDb(), tableName.getTable(), throwException);
    } catch (InvalidTableException e) {
        LOG.error("Failed to find table " + tableName.getNotEmptyDbTable() + " got exception " + e.getMessage());
        throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e);
    } catch (HiveException e) {
        LOG.error("Failed to find table " + tableName.getNotEmptyDbTable() + " got exception " + e.getMessage());
        throw new SemanticException(e.getMessage(), e);
    }
    return mTable;
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException)

Example 25 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class Hive method dropPartitions.

public List<Partition> dropPartitions(String dbName, String tableName, List<Pair<Integer, byte[]>> partitionExpressions, PartitionDropOptions dropOptions) throws HiveException {
    try {
        Table table = getTable(dbName, tableName);
        if (!dropOptions.deleteData) {
            AcidUtils.TableSnapshot snapshot = AcidUtils.getTableSnapshot(conf, table, true);
            if (snapshot != null) {
                dropOptions.setWriteId(snapshot.getWriteId());
            }
            long txnId = Optional.ofNullable(SessionState.get()).map(ss -> ss.getTxnMgr().getCurrentTxnId()).orElse(0L);
            dropOptions.setTxnId(txnId);
        }
        List<org.apache.hadoop.hive.metastore.api.Partition> partitions = getMSC().dropPartitions(dbName, tableName, partitionExpressions, dropOptions);
        return convertFromMetastore(table, partitions);
    } catch (NoSuchObjectException e) {
        throw new HiveException("Partition or table doesn't exist.", e);
    } catch (Exception e) {
        throw new HiveException(e.getMessage(), e);
    }
}
Also used : MetaStoreUtils.getDefaultCatalog(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog) CALCITE(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.CALCITE) FileSystem(org.apache.hadoop.fs.FileSystem) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Future(java.util.concurrent.Future) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) GetPartitionRequest(org.apache.hadoop.hive.metastore.api.GetPartitionRequest) DefaultConstraintsRequest(org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest) EnumSet(java.util.EnumSet) SQLAllTableConstraints(org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) FileChecksum(org.apache.hadoop.fs.FileChecksum) CheckConstraintsRequest(org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest) WMMapping(org.apache.hadoop.hive.metastore.api.WMMapping) HiveMaterializedViewUtils(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils) AcidUtils.getFullTableName(org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName) SetPartitionsStatsRequest(org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) ForeignKeysRequest(org.apache.hadoop.hive.metastore.api.ForeignKeysRequest) FileUtils(org.apache.hadoop.hive.common.FileUtils) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) HdfsUtils(org.apache.hadoop.hive.ql.io.HdfsUtils) InsertEventRequestData(org.apache.hadoop.hive.metastore.api.InsertEventRequestData) Role(org.apache.hadoop.hive.metastore.api.Role) CollectionUtils(org.apache.commons.collections4.CollectionUtils) CmRecycleRequest(org.apache.hadoop.hive.metastore.api.CmRecycleRequest) Lists(com.google.common.collect.Lists) MATERIALIZED_VIEW_REWRITING_TIME_WINDOW(org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Unstable(org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) ReplChangeManager(org.apache.hadoop.hive.metastore.ReplChangeManager) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) AllTableConstraintsRequest(org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) IOException(java.io.IOException) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) MetaStoreServerUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) Preconditions(com.google.common.base.Preconditions) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) PrimaryKeysRequest(org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SerializationUtilities(org.apache.hadoop.hive.ql.exec.SerializationUtilities) HiveObjectType(org.apache.hadoop.hive.metastore.api.HiveObjectType) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CompactionType(org.apache.hadoop.hive.metastore.api.CompactionType) GetPartitionNamesPsResponse(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse) ColumnStatisticsDesc(org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc) SynchronizedMetaStoreClient(org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InPlaceUpdate(org.apache.hadoop.hive.common.log.InPlaceUpdate) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) GetPartitionsByNamesRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest) WMNullableResourcePlan(org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FireEventRequest(org.apache.hadoop.hive.metastore.api.FireEventRequest) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) CompactionResponse(org.apache.hadoop.hive.metastore.api.CompactionResponse) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) DataConnector(org.apache.hadoop.hive.metastore.api.DataConnector) Entry(java.util.Map.Entry) RetryingMetaStoreClient(org.apache.hadoop.hive.metastore.RetryingMetaStoreClient) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) FilenameUtils(org.apache.commons.io.FilenameUtils) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) WMNullablePool(org.apache.hadoop.hive.metastore.api.WMNullablePool) PathFilter(org.apache.hadoop.fs.PathFilter) WriteNotificationLogBatchRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest) HashSet(java.util.HashSet) ListBucketingPrunerUtils(org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils) PartitionsByExprRequest(org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest) SERIALIZATION_FORMAT(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT) ImmutableList(com.google.common.collect.ImmutableList) UpdateTransactionalStatsRequest(org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest) StringUtils(org.apache.hadoop.util.StringUtils) LinkedList(java.util.LinkedList) TApplicationException(org.apache.thrift.TApplicationException) ExecutorService(java.util.concurrent.ExecutorService) AbstractFileMergeOperator(org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator) PrintStream(java.io.PrintStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Logger(org.slf4j.Logger) HiveMetaHookLoader(org.apache.hadoop.hive.metastore.HiveMetaHookLoader) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveVersionInfo(org.apache.hive.common.util.HiveVersionInfo) HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) ShimLoader(org.apache.hadoop.hive.shims.ShimLoader) BitSet(java.util.BitSet) PartitionDropOptions(org.apache.hadoop.hive.metastore.PartitionDropOptions) NotNullConstraintsRequest(org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest) Arrays(java.util.Arrays) WMResourcePlan(org.apache.hadoop.hive.metastore.api.WMResourcePlan) GetPartitionResponse(org.apache.hadoop.hive.metastore.api.GetPartitionResponse) CreateTableAutomaticGrant(org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant) HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE) InetAddress(java.net.InetAddress) UniqueConstraintsRequest(org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) GetPartitionsPsWithAuthResponse(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse) Set(java.util.Set) STRING_TYPE_NAME(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME) Executors(java.util.concurrent.Executors) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Callable(java.util.concurrent.Callable) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) LinkedHashSet(java.util.LinkedHashSet) Nullable(javax.annotation.Nullable) PrincipalPrivilegeSet(org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet) Materialization(org.apache.hadoop.hive.metastore.api.Materialization) GetPartitionsPsWithAuthRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest) SOFT_DELETE_TABLE(org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE) FunctionUtils(org.apache.hadoop.hive.ql.exec.FunctionUtils) TException(org.apache.thrift.TException) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) TableType(org.apache.hadoop.hive.metastore.TableType) HiveObjectRef(org.apache.hadoop.hive.metastore.api.HiveObjectRef) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) GetRoleGrantsForPrincipalResponse(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse) LoggerFactory(org.slf4j.LoggerFactory) RolePrincipalGrant(org.apache.hadoop.hive.metastore.api.RolePrincipalGrant) ByteBuffer(java.nio.ByteBuffer) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ALL(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL) HiveStatsUtils(org.apache.hadoop.hive.common.HiveStatsUtils) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList) FireEventRequestData(org.apache.hadoop.hive.metastore.api.FireEventRequestData) Splitter(com.google.common.base.Splitter) PrivilegeBag(org.apache.hadoop.hive.metastore.api.PrivilegeBag) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) ImmutableMap(com.google.common.collect.ImmutableMap) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) HiveMetaHook(org.apache.hadoop.hive.metastore.HiveMetaHook) AggrStats(org.apache.hadoop.hive.metastore.api.AggrStats) Sets(com.google.common.collect.Sets) FileNotFoundException(java.io.FileNotFoundException) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) List(java.util.List) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) MetadataPpdResult(org.apache.hadoop.hive.metastore.api.MetadataPpdResult) Optional(java.util.Optional) HiveMetaStoreUtils(org.apache.hadoop.hive.metastore.HiveMetaStoreUtils) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) HiveObjectPrivilege(org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) GetRoleGrantsForPrincipalRequest(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest) HashMap(java.util.HashMap) GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest) LimitedPrivate(org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) JDODataStoreException(javax.jdo.JDODataStoreException) ObjectUtils(org.apache.commons.lang3.ObjectUtils) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) Iterator(java.util.Iterator) WMValidateResourcePlanResponse(org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) Maps(com.google.common.collect.Maps) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) META_TABLE_STORAGE(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) GetPartitionNamesPsRequest(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) Function(org.apache.hadoop.hive.metastore.api.Function) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) TApplicationException(org.apache.thrift.TApplicationException) TException(org.apache.thrift.TException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Aggregations

TableName (org.apache.hadoop.hive.common.TableName)47 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)22 ArrayList (java.util.ArrayList)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)11 HiveTableName (org.apache.hadoop.hive.ql.parse.HiveTableName)10 HashMap (java.util.HashMap)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 List (java.util.List)7 Path (org.apache.hadoop.fs.Path)7 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)7 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)6