Search in sources :

Example 1 with AlterDatabaseDesc

use of org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc in project hive by apache.

the class DDLTask method execute.

@Override
public int execute(DriverContext driverContext) {
    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
        return 0;
    }
    // Create the db
    Hive db;
    try {
        db = Hive.get(conf);
        CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
        if (null != createDatabaseDesc) {
            return createDatabase(db, createDatabaseDesc);
        }
        DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
        if (dropDatabaseDesc != null) {
            return dropDatabase(db, dropDatabaseDesc);
        }
        LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
        if (lockDatabaseDesc != null) {
            return lockDatabase(db, lockDatabaseDesc);
        }
        UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
        if (unlockDatabaseDesc != null) {
            return unlockDatabase(db, unlockDatabaseDesc);
        }
        SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
        if (switchDatabaseDesc != null) {
            return switchDatabase(db, switchDatabaseDesc);
        }
        DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
        if (descDatabaseDesc != null) {
            return descDatabase(db, descDatabaseDesc);
        }
        AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
        if (alterDatabaseDesc != null) {
            return alterDatabase(db, alterDatabaseDesc);
        }
        CreateTableDesc crtTbl = work.getCreateTblDesc();
        if (crtTbl != null) {
            return createTable(db, crtTbl);
        }
        CreateIndexDesc crtIndex = work.getCreateIndexDesc();
        if (crtIndex != null) {
            return createIndex(db, crtIndex);
        }
        AlterIndexDesc alterIndex = work.getAlterIndexDesc();
        if (alterIndex != null) {
            return alterIndex(db, alterIndex);
        }
        DropIndexDesc dropIdx = work.getDropIdxDesc();
        if (dropIdx != null) {
            return dropIndex(db, dropIdx);
        }
        CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
        if (crtTblLike != null) {
            return createTableLike(db, crtTblLike);
        }
        DropTableDesc dropTbl = work.getDropTblDesc();
        if (dropTbl != null) {
            dropTableOrPartitions(db, dropTbl);
            return 0;
        }
        AlterTableDesc alterTbl = work.getAlterTblDesc();
        if (alterTbl != null) {
            if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
                return dropConstraint(db, alterTbl);
            } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
                return addConstraint(db, alterTbl);
            } else {
                return alterTable(db, alterTbl);
            }
        }
        CreateViewDesc crtView = work.getCreateViewDesc();
        if (crtView != null) {
            return createView(db, crtView);
        }
        AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
        if (addPartitionDesc != null) {
            return addPartitions(db, addPartitionDesc);
        }
        RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
        if (renamePartitionDesc != null) {
            return renamePartition(db, renamePartitionDesc);
        }
        AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
        if (simpleDesc != null) {
            if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
                return touch(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
                return archive(db, simpleDesc, driverContext);
            } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
                return unarchive(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
                return compact(db, simpleDesc);
            }
        }
        MsckDesc msckDesc = work.getMsckDesc();
        if (msckDesc != null) {
            return msck(db, msckDesc);
        }
        DescTableDesc descTbl = work.getDescTblDesc();
        if (descTbl != null) {
            return describeTable(db, descTbl);
        }
        DescFunctionDesc descFunc = work.getDescFunctionDesc();
        if (descFunc != null) {
            return describeFunction(db, descFunc);
        }
        ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
        if (showDatabases != null) {
            return showDatabases(db, showDatabases);
        }
        ShowTablesDesc showTbls = work.getShowTblsDesc();
        if (showTbls != null) {
            return showTablesOrViews(db, showTbls);
        }
        ShowColumnsDesc showCols = work.getShowColumnsDesc();
        if (showCols != null) {
            return showColumns(db, showCols);
        }
        ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
        if (showTblStatus != null) {
            return showTableStatus(db, showTblStatus);
        }
        ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
        if (showTblProperties != null) {
            return showTableProperties(db, showTblProperties);
        }
        ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
        if (showFuncs != null) {
            return showFunctions(db, showFuncs);
        }
        ShowLocksDesc showLocks = work.getShowLocksDesc();
        if (showLocks != null) {
            return showLocks(db, showLocks);
        }
        ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
        if (compactionsDesc != null) {
            return showCompactions(db, compactionsDesc);
        }
        ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
        if (txnsDesc != null) {
            return showTxns(db, txnsDesc);
        }
        AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
        if (abortTxnsDesc != null) {
            return abortTxns(db, abortTxnsDesc);
        }
        LockTableDesc lockTbl = work.getLockTblDesc();
        if (lockTbl != null) {
            return lockTable(db, lockTbl);
        }
        UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
        if (unlockTbl != null) {
            return unlockTable(db, unlockTbl);
        }
        ShowPartitionsDesc showParts = work.getShowPartsDesc();
        if (showParts != null) {
            return showPartitions(db, showParts);
        }
        ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
        if (showCreateDb != null) {
            return showCreateDatabase(db, showCreateDb);
        }
        ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
        if (showCreateTbl != null) {
            return showCreateTable(db, showCreateTbl);
        }
        ShowConfDesc showConf = work.getShowConfDesc();
        if (showConf != null) {
            return showConf(db, showConf);
        }
        RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
        if (roleDDLDesc != null) {
            return roleDDL(db, roleDDLDesc);
        }
        GrantDesc grantDesc = work.getGrantDesc();
        if (grantDesc != null) {
            return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
        }
        RevokeDesc revokeDesc = work.getRevokeDesc();
        if (revokeDesc != null) {
            return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
        }
        ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
        if (showGrantDesc != null) {
            return showGrants(db, showGrantDesc);
        }
        GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
        if (grantOrRevokeRoleDDL != null) {
            return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
        }
        ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
        if (showIndexes != null) {
            return showIndexes(db, showIndexes);
        }
        AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
        if (mergeFilesDesc != null) {
            return mergeFiles(db, mergeFilesDesc, driverContext);
        }
        AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
        if (alterPartDesc != null) {
            return alterTableAlterPart(db, alterPartDesc);
        }
        TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
        if (truncateTableDesc != null) {
            return truncateTable(db, truncateTableDesc);
        }
        AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
        if (alterTableExchangePartition != null) {
            return exchangeTablePartition(db, alterTableExchangePartition);
        }
        CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
        if (cacheMetadataDesc != null) {
            return cacheMetadata(db, cacheMetadataDesc);
        }
        InsertTableDesc insertTableDesc = work.getInsertTableDesc();
        if (insertTableDesc != null) {
            return insertCommitWork(db, insertTableDesc);
        }
        PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
        if (preInsertTableDesc != null) {
            return preInsertWork(db, preInsertTableDesc);
        }
    } catch (Throwable e) {
        failed(e);
        return 1;
    }
    assert false;
    return 0;
}
Also used : DropDatabaseDesc(org.apache.hadoop.hive.ql.plan.DropDatabaseDesc) LockTableDesc(org.apache.hadoop.hive.ql.plan.LockTableDesc) UnlockTableDesc(org.apache.hadoop.hive.ql.plan.UnlockTableDesc) ShowTblPropertiesDesc(org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc) RenamePartitionDesc(org.apache.hadoop.hive.ql.plan.RenamePartitionDesc) ShowDatabasesDesc(org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc) DescFunctionDesc(org.apache.hadoop.hive.ql.plan.DescFunctionDesc) CreateViewDesc(org.apache.hadoop.hive.ql.plan.CreateViewDesc) ShowTxnsDesc(org.apache.hadoop.hive.ql.plan.ShowTxnsDesc) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc) ShowCompactionsDesc(org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) GrantDesc(org.apache.hadoop.hive.ql.plan.GrantDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) AlterTableSimpleDesc(org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc) DescTableDesc(org.apache.hadoop.hive.ql.plan.DescTableDesc) GrantRevokeRoleDDL(org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL) TruncateTableDesc(org.apache.hadoop.hive.ql.plan.TruncateTableDesc) DropTableDesc(org.apache.hadoop.hive.ql.plan.DropTableDesc) ShowTablesDesc(org.apache.hadoop.hive.ql.plan.ShowTablesDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) ShowFunctionsDesc(org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc) AbortTxnsDesc(org.apache.hadoop.hive.ql.plan.AbortTxnsDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) CacheMetadataDesc(org.apache.hadoop.hive.ql.plan.CacheMetadataDesc) UnlockDatabaseDesc(org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc) RevokeDesc(org.apache.hadoop.hive.ql.plan.RevokeDesc) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) MsckDesc(org.apache.hadoop.hive.ql.plan.MsckDesc) ShowLocksDesc(org.apache.hadoop.hive.ql.plan.ShowLocksDesc) DropIndexDesc(org.apache.hadoop.hive.ql.plan.DropIndexDesc) SwitchDatabaseDesc(org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc) ShowConfDesc(org.apache.hadoop.hive.ql.plan.ShowConfDesc) ShowPartitionsDesc(org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc) CreateIndexDesc(org.apache.hadoop.hive.ql.plan.CreateIndexDesc) CreateTableLikeDesc(org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc) ShowTableStatusDesc(org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) AlterTableAlterPartDesc(org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc) AlterIndexDesc(org.apache.hadoop.hive.ql.plan.AlterIndexDesc) ShowIndexesDesc(org.apache.hadoop.hive.ql.plan.ShowIndexesDesc) RoleDDLDesc(org.apache.hadoop.hive.ql.plan.RoleDDLDesc) AlterTablePartMergeFilesDesc(org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) LockDatabaseDesc(org.apache.hadoop.hive.ql.plan.LockDatabaseDesc) CreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) InsertTableDesc(org.apache.hadoop.hive.ql.plan.InsertTableDesc) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc) DescDatabaseDesc(org.apache.hadoop.hive.ql.plan.DescDatabaseDesc) ShowColumnsDesc(org.apache.hadoop.hive.ql.plan.ShowColumnsDesc)

Example 2 with AlterDatabaseDesc

use of org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterDatabaseOwner.

private void analyzeAlterDatabaseOwner(ASTNode ast) throws SemanticException {
    String dbName = getUnescapedName((ASTNode) ast.getChild(0));
    PrincipalDesc principalDesc = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast.getChild(1));
    // The syntax should not allow these fields to be null, but lets verify
    String nullCmdMsg = "can't be null in alter database set owner command";
    if (principalDesc.getName() == null) {
        throw new SemanticException("Owner name " + nullCmdMsg);
    }
    if (principalDesc.getType() == null) {
        throw new SemanticException("Owner type " + nullCmdMsg);
    }
    AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, principalDesc);
    addAlterDbDesc(alterDesc);
}
Also used : PrincipalDesc(org.apache.hadoop.hive.ql.plan.PrincipalDesc) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc)

Example 3 with AlterDatabaseDesc

use of org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc in project hive by apache.

the class ReplicationSemanticAnalyzer method analyzeReplLoad.

/*
   * Example dump dirs we need to be able to handle :
   *
   * for: hive.repl.rootdir = staging/
   * Then, repl dumps will be created in staging/<dumpdir>
   *
   * single-db-dump: staging/blah12345 will contain a db dir for the db specified
   *  blah12345/
   *   default/
   *    _metadata
   *    tbl1/
   *      _metadata
   *      dt=20160907/
   *        _files
   *    tbl2/
   *    tbl3/
   *    unptn_tbl/
   *      _metadata
   *      _files
   *
   * multi-db-dump: staging/bar12347 will contain dirs for each db covered
   * staging/
   *  bar12347/
   *   default/
   *     ...
   *   sales/
   *     ...
   *
   * single table-dump: staging/baz123 will contain a table object dump inside
   * staging/
   *  baz123/
   *    _metadata
   *    dt=20150931/
   *      _files
   *
   * incremental dump : staging/blue123 will contain dirs for each event inside.
   * staging/
   *  blue123/
   *    34/
   *    35/
   *    36/
   */
private void analyzeReplLoad(ASTNode ast) throws SemanticException {
    LOG.debug("ReplSemanticAnalyzer.analyzeReplLoad: " + String.valueOf(dbNameOrPattern) + "." + String.valueOf(tblNameOrPattern) + " from " + String.valueOf(path));
    try {
        Path loadPath = new Path(path);
        final FileSystem fs = loadPath.getFileSystem(conf);
        if (!fs.exists(loadPath)) {
            // supposed dump path does not exist.
            throw new FileNotFoundException(loadPath.toUri().toString());
        }
        // Now, the dumped path can be one of three things:
        // a) It can be a db dump, in which case we expect a set of dirs, each with a
        // db name, and with a _metadata file in each, and table dirs inside that.
        // b) It can be a table dump dir, in which case we expect a _metadata dump of
        // a table in question in the dir, and individual ptn dir hierarchy.
        // c) A dump can be an incremental dump, which means we have several subdirs
        // each of which have the evid as the dir name, and each of which correspond
        // to a event-level dump. Currently, only CREATE_TABLE and ADD_PARTITION are
        // handled, so all of these dumps will be at a table/ptn level.
        // For incremental repl, we will have individual events which can
        // be other things like roles and fns as well.
        // At this point, all dump dirs should contain a _dumpmetadata file that
        // tells us what is inside that dumpdir.
        DumpMetaData dmd = new DumpMetaData(loadPath);
        boolean evDump = false;
        if (dmd.isIncrementalDump()) {
            LOG.debug("{} contains an incremental dump", loadPath);
            evDump = true;
        } else {
            LOG.debug("{} contains an bootstrap dump", loadPath);
        }
        if ((!evDump) && (tblNameOrPattern != null) && !(tblNameOrPattern.isEmpty())) {
            // not an event dump, and table name pattern specified, this has to be a tbl-level dump
            rootTasks.addAll(analyzeTableLoad(dbNameOrPattern, tblNameOrPattern, path, null, null, null));
            return;
        }
        FileStatus[] srcs = LoadSemanticAnalyzer.matchFilesOrDir(fs, loadPath);
        if (srcs == null || (srcs.length == 0)) {
            LOG.warn("Nothing to load at {}", loadPath.toUri().toString());
            return;
        }
        FileStatus[] dirsInLoadPath = fs.listStatus(loadPath, EximUtil.getDirectoryFilter(fs));
        if ((dirsInLoadPath == null) || (dirsInLoadPath.length == 0)) {
            throw new IllegalArgumentException("No data to load in path " + loadPath.toUri().toString());
        }
        if (!evDump) {
            // not an event dump, not a table dump - thus, a db dump
            if ((dbNameOrPattern != null) && (dirsInLoadPath.length > 1)) {
                LOG.debug("Found multiple dirs when we expected 1:");
                for (FileStatus d : dirsInLoadPath) {
                    LOG.debug("> " + d.getPath().toUri().toString());
                }
                throw new IllegalArgumentException("Multiple dirs in " + loadPath.toUri().toString() + " does not correspond to REPL LOAD expecting to load to a singular destination point.");
            }
            for (FileStatus dir : dirsInLoadPath) {
                analyzeDatabaseLoad(dbNameOrPattern, fs, dir);
            }
        } else {
            // event dump, each subdir is an individual event dump.
            // we need to guarantee that the directory listing we got is in order of evid.
            Arrays.sort(dirsInLoadPath);
            Task<? extends Serializable> evTaskRoot = TaskFactory.get(new DependencyCollectionWork(), conf);
            Task<? extends Serializable> taskChainTail = evTaskRoot;
            int evstage = 0;
            Long lastEvid = null;
            Map<String, Long> dbsUpdated = new ReplicationSpec.ReplStateMap<String, Long>();
            Map<String, Long> tablesUpdated = new ReplicationSpec.ReplStateMap<String, Long>();
            for (FileStatus dir : dirsInLoadPath) {
                LOG.debug("Loading event from {} to {}.{}", dir.getPath().toUri(), dbNameOrPattern, tblNameOrPattern);
                // event loads will behave similar to table loads, with one crucial difference
                // precursor order is strict, and each event must be processed after the previous one.
                // The way we handle this strict order is as follows:
                // First, we start with a taskChainTail which is a dummy noop task (a DependecyCollectionTask)
                // at the head of our event chain. For each event we process, we tell analyzeTableLoad to
                // create tasks that use the taskChainTail as a dependency. Then, we collect all those tasks
                // and introduce a new barrier task(also a DependencyCollectionTask) which depends on all
                // these tasks. Then, this barrier task becomes our new taskChainTail. Thus, we get a set of
                // tasks as follows:
                //
                //                 --->ev1.task1--                          --->ev2.task1--
                //                /               \                        /               \
                //  evTaskRoot-->*---->ev1.task2---*--> ev1.barrierTask-->*---->ev2.task2---*->evTaskChainTail
                //                \               /
                //                 --->ev1.task3--
                //
                // Once this entire chain is generated, we add evTaskRoot to rootTasks, so as to execute the
                // entire chain
                String locn = dir.getPath().toUri().toString();
                DumpMetaData eventDmd = new DumpMetaData(new Path(locn));
                List<Task<? extends Serializable>> evTasks = analyzeEventLoad(dbNameOrPattern, tblNameOrPattern, locn, taskChainTail, dbsUpdated, tablesUpdated, eventDmd);
                LOG.debug("evstage#{} got {} tasks", evstage, evTasks != null ? evTasks.size() : 0);
                if ((evTasks != null) && (!evTasks.isEmpty())) {
                    Task<? extends Serializable> barrierTask = TaskFactory.get(new DependencyCollectionWork(), conf);
                    for (Task<? extends Serializable> t : evTasks) {
                        t.addDependentTask(barrierTask);
                        LOG.debug("Added {}:{} as a precursor of barrier task {}:{}", t.getClass(), t.getId(), barrierTask.getClass(), barrierTask.getId());
                    }
                    LOG.debug("Updated taskChainTail from {}{} to {}{}", taskChainTail.getClass(), taskChainTail.getId(), barrierTask.getClass(), barrierTask.getId());
                    taskChainTail = barrierTask;
                    evstage++;
                    lastEvid = dmd.eventTo;
                }
            }
            if (evstage > 0) {
                if ((tblNameOrPattern != null) && (!tblNameOrPattern.isEmpty())) {
                    // if tblNameOrPattern is specified, then dbNameOrPattern will be too, and
                    // thus, this is a table-level REPL LOAD - only table needs updating.
                    // If any of the individual events logged any other dbs as having changed,
                    // null them out.
                    dbsUpdated.clear();
                    tablesUpdated.clear();
                    tablesUpdated.put(dbNameOrPattern + "." + tblNameOrPattern, lastEvid);
                } else if ((dbNameOrPattern != null) && (!dbNameOrPattern.isEmpty())) {
                    // if dbNameOrPattern is specified and tblNameOrPattern isn't, this is a
                    // db-level update, and thus, the database needs updating. In addition.
                    dbsUpdated.clear();
                    dbsUpdated.put(dbNameOrPattern, lastEvid);
                }
            }
            for (String tableName : tablesUpdated.keySet()) {
                // weird - AlterTableDesc requires a HashMap to update props instead of a Map.
                HashMap<String, String> mapProp = new HashMap<String, String>();
                mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), tablesUpdated.get(tableName).toString());
                AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableDesc.AlterTableTypes.ADDPROPS, null, false);
                alterTblDesc.setProps(mapProp);
                alterTblDesc.setOldName(tableName);
                Task<? extends Serializable> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf);
                taskChainTail.addDependentTask(updateReplIdTask);
                taskChainTail = updateReplIdTask;
            }
            for (String dbName : dbsUpdated.keySet()) {
                Map<String, String> mapProp = new HashMap<String, String>();
                mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), dbsUpdated.get(dbName).toString());
                AlterDatabaseDesc alterDbDesc = new AlterDatabaseDesc(dbName, mapProp);
                Task<? extends Serializable> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc), conf);
                taskChainTail.addDependentTask(updateReplIdTask);
                taskChainTail = updateReplIdTask;
            }
            rootTasks.add(evTaskRoot);
        }
    } catch (Exception e) {
        // TODO : simple wrap & rethrow for now, clean up with error codes
        throw new SemanticException(e);
    }
}
Also used : AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) FileNotFoundException(java.io.FileNotFoundException) FileSystem(org.apache.hadoop.fs.FileSystem) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc) Path(org.apache.hadoop.fs.Path) FileNotFoundException(java.io.FileNotFoundException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) DependencyCollectionWork(org.apache.hadoop.hive.ql.plan.DependencyCollectionWork) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork)

Example 4 with AlterDatabaseDesc

use of org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterDatabaseProperties.

private void analyzeAlterDatabaseProperties(ASTNode ast) throws SemanticException {
    String dbName = unescapeIdentifier(ast.getChild(0).getText());
    Map<String, String> dbProps = null;
    for (int i = 1; i < ast.getChildCount(); i++) {
        ASTNode childNode = (ASTNode) ast.getChild(i);
        switch(childNode.getToken().getType()) {
            case HiveParser.TOK_DATABASEPROPERTIES:
                dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0));
                break;
            default:
                throw new SemanticException("Unrecognized token in CREATE DATABASE statement");
        }
    }
    AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, dbProps);
    addAlterDbDesc(alterDesc);
}
Also used : AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc)

Aggregations

AlterDatabaseDesc (org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc)4 AlterTableDesc (org.apache.hadoop.hive.ql.plan.AlterTableDesc)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 Serializable (java.io.Serializable)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 Task (org.apache.hadoop.hive.ql.exec.Task)1 Hive (org.apache.hadoop.hive.ql.metadata.Hive)1 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)1 AlterTablePartMergeFilesDesc (org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc)1 PreInsertTableDesc (org.apache.hadoop.hive.ql.parse.PreInsertTableDesc)1 AbortTxnsDesc (org.apache.hadoop.hive.ql.plan.AbortTxnsDesc)1 AddPartitionDesc (org.apache.hadoop.hive.ql.plan.AddPartitionDesc)1 AlterIndexDesc (org.apache.hadoop.hive.ql.plan.AlterIndexDesc)1 AlterTableAlterPartDesc (org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc)1 AlterTableExchangePartition (org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition)1