use of org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc in project hive by apache.
the class DDLTask method execute.
@Override
public int execute(DriverContext driverContext) {
if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
return 0;
}
// Create the db
Hive db;
try {
db = Hive.get(conf);
CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
if (null != createDatabaseDesc) {
return createDatabase(db, createDatabaseDesc);
}
DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
if (dropDatabaseDesc != null) {
return dropDatabase(db, dropDatabaseDesc);
}
LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
if (lockDatabaseDesc != null) {
return lockDatabase(db, lockDatabaseDesc);
}
UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
if (unlockDatabaseDesc != null) {
return unlockDatabase(db, unlockDatabaseDesc);
}
SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
if (switchDatabaseDesc != null) {
return switchDatabase(db, switchDatabaseDesc);
}
DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
if (descDatabaseDesc != null) {
return descDatabase(db, descDatabaseDesc);
}
AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
if (alterDatabaseDesc != null) {
return alterDatabase(db, alterDatabaseDesc);
}
CreateTableDesc crtTbl = work.getCreateTblDesc();
if (crtTbl != null) {
return createTable(db, crtTbl);
}
CreateIndexDesc crtIndex = work.getCreateIndexDesc();
if (crtIndex != null) {
return createIndex(db, crtIndex);
}
AlterIndexDesc alterIndex = work.getAlterIndexDesc();
if (alterIndex != null) {
return alterIndex(db, alterIndex);
}
DropIndexDesc dropIdx = work.getDropIdxDesc();
if (dropIdx != null) {
return dropIndex(db, dropIdx);
}
CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
if (crtTblLike != null) {
return createTableLike(db, crtTblLike);
}
DropTableDesc dropTbl = work.getDropTblDesc();
if (dropTbl != null) {
dropTableOrPartitions(db, dropTbl);
return 0;
}
AlterTableDesc alterTbl = work.getAlterTblDesc();
if (alterTbl != null) {
if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
return dropConstraint(db, alterTbl);
} else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
return addConstraint(db, alterTbl);
} else {
return alterTable(db, alterTbl);
}
}
CreateViewDesc crtView = work.getCreateViewDesc();
if (crtView != null) {
return createView(db, crtView);
}
AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
if (addPartitionDesc != null) {
return addPartitions(db, addPartitionDesc);
}
RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
if (renamePartitionDesc != null) {
return renamePartition(db, renamePartitionDesc);
}
AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
if (simpleDesc != null) {
if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
return touch(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
return archive(db, simpleDesc, driverContext);
} else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
return unarchive(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
return compact(db, simpleDesc);
}
}
MsckDesc msckDesc = work.getMsckDesc();
if (msckDesc != null) {
return msck(db, msckDesc);
}
DescTableDesc descTbl = work.getDescTblDesc();
if (descTbl != null) {
return describeTable(db, descTbl);
}
DescFunctionDesc descFunc = work.getDescFunctionDesc();
if (descFunc != null) {
return describeFunction(db, descFunc);
}
ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
if (showDatabases != null) {
return showDatabases(db, showDatabases);
}
ShowTablesDesc showTbls = work.getShowTblsDesc();
if (showTbls != null) {
return showTablesOrViews(db, showTbls);
}
ShowColumnsDesc showCols = work.getShowColumnsDesc();
if (showCols != null) {
return showColumns(db, showCols);
}
ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
if (showTblStatus != null) {
return showTableStatus(db, showTblStatus);
}
ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
if (showTblProperties != null) {
return showTableProperties(db, showTblProperties);
}
ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
if (showFuncs != null) {
return showFunctions(db, showFuncs);
}
ShowLocksDesc showLocks = work.getShowLocksDesc();
if (showLocks != null) {
return showLocks(db, showLocks);
}
ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
if (compactionsDesc != null) {
return showCompactions(db, compactionsDesc);
}
ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
if (txnsDesc != null) {
return showTxns(db, txnsDesc);
}
AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
if (abortTxnsDesc != null) {
return abortTxns(db, abortTxnsDesc);
}
LockTableDesc lockTbl = work.getLockTblDesc();
if (lockTbl != null) {
return lockTable(db, lockTbl);
}
UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
if (unlockTbl != null) {
return unlockTable(db, unlockTbl);
}
ShowPartitionsDesc showParts = work.getShowPartsDesc();
if (showParts != null) {
return showPartitions(db, showParts);
}
ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
if (showCreateDb != null) {
return showCreateDatabase(db, showCreateDb);
}
ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
if (showCreateTbl != null) {
return showCreateTable(db, showCreateTbl);
}
ShowConfDesc showConf = work.getShowConfDesc();
if (showConf != null) {
return showConf(db, showConf);
}
RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
if (roleDDLDesc != null) {
return roleDDL(db, roleDDLDesc);
}
GrantDesc grantDesc = work.getGrantDesc();
if (grantDesc != null) {
return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
}
RevokeDesc revokeDesc = work.getRevokeDesc();
if (revokeDesc != null) {
return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
}
ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
if (showGrantDesc != null) {
return showGrants(db, showGrantDesc);
}
GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
if (grantOrRevokeRoleDDL != null) {
return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
}
ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
if (showIndexes != null) {
return showIndexes(db, showIndexes);
}
AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
if (mergeFilesDesc != null) {
return mergeFiles(db, mergeFilesDesc, driverContext);
}
AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
if (alterPartDesc != null) {
return alterTableAlterPart(db, alterPartDesc);
}
TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
if (truncateTableDesc != null) {
return truncateTable(db, truncateTableDesc);
}
AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
if (alterTableExchangePartition != null) {
return exchangeTablePartition(db, alterTableExchangePartition);
}
CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
if (cacheMetadataDesc != null) {
return cacheMetadata(db, cacheMetadataDesc);
}
InsertTableDesc insertTableDesc = work.getInsertTableDesc();
if (insertTableDesc != null) {
return insertCommitWork(db, insertTableDesc);
}
PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
if (preInsertTableDesc != null) {
return preInsertWork(db, preInsertTableDesc);
}
} catch (Throwable e) {
failed(e);
return 1;
}
assert false;
return 0;
}
use of org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc in project hive by apache.
the class DDLSemanticAnalyzer method analyzeCreateDatabase.
private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifNotExists = false;
String dbComment = null;
String dbLocation = null;
Map<String, String> dbProps = null;
for (int i = 1; i < ast.getChildCount(); i++) {
ASTNode childNode = (ASTNode) ast.getChild(i);
switch(childNode.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.TOK_DATABASECOMMENT:
dbComment = unescapeSQLString(childNode.getChild(0).getText());
break;
case TOK_DATABASEPROPERTIES:
dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0));
break;
case TOK_DATABASELOCATION:
dbLocation = unescapeSQLString(childNode.getChild(0).getText());
addLocationToOutputs(dbLocation);
break;
default:
throw new SemanticException("Unrecognized token in CREATE DATABASE statement");
}
}
CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists);
if (dbProps != null) {
createDatabaseDesc.setDatabaseProperties(dbProps);
}
Database database = new Database(dbName, dbComment, dbLocation, dbProps);
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createDatabaseDesc), conf));
}
use of org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc in project hive by apache.
the class ReplicationSemanticAnalyzer method analyzeDatabaseLoad.
private void analyzeDatabaseLoad(String dbName, FileSystem fs, FileStatus dir) throws SemanticException {
try {
// Path being passed to us is a db dump location. We go ahead and load as needed.
// dbName might be null or empty, in which case we keep the original db name for the new
// database creation
// Two steps here - first, we read the _metadata file here, and create a CreateDatabaseDesc
// associated with that
// Then, we iterate over all subdirs, and create table imports for each.
EximUtil.ReadMetaData rv = new EximUtil.ReadMetaData();
try {
rv = EximUtil.readMetaData(fs, new Path(dir.getPath(), EximUtil.METADATA_NAME));
} catch (IOException e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
}
Database dbObj = rv.getDatabase();
if (dbObj == null) {
throw new IllegalArgumentException("_metadata file read did not contain a db object - invalid dump.");
}
if ((dbName == null) || (dbName.isEmpty())) {
// We use dbName specified as long as it is not null/empty. If so, then we use the original
// name
// recorded in the thrift object.
dbName = dbObj.getName();
}
CreateDatabaseDesc createDbDesc = new CreateDatabaseDesc();
createDbDesc.setName(dbName);
createDbDesc.setComment(dbObj.getDescription());
createDbDesc.setDatabaseProperties(dbObj.getParameters());
// note that we do not set location - for repl load, we want that auto-created.
createDbDesc.setIfNotExists(false);
// If it exists, we want this to be an error condition. Repl Load is not intended to replace a
// db.
// TODO: we might revisit this in create-drop-recreate cases, needs some thinking on.
Task<? extends Serializable> createDbTask = TaskFactory.get(new DDLWork(inputs, outputs, createDbDesc), conf);
rootTasks.add(createDbTask);
FileStatus[] dirsInDbPath = fs.listStatus(dir.getPath(), EximUtil.getDirectoryFilter(fs));
for (FileStatus tableDir : dirsInDbPath) {
analyzeTableLoad(dbName, null, tableDir.getPath().toUri().toString(), createDbTask, null, null);
}
} catch (Exception e) {
throw new SemanticException(e);
}
}
use of org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc in project hive by apache.
the class CreateDatabaseHook method authorizeDDLWork.
@Override
protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context, Hive hive, DDLWork work) throws HiveException {
CreateDatabaseDesc createDb = work.getCreateDatabaseDesc();
if (createDb != null) {
Database db = new Database(createDb.getName(), createDb.getComment(), createDb.getLocationUri(), createDb.getDatabaseProperties());
authorize(db, Privilege.CREATE);
}
}
Aggregations