use of org.apache.hadoop.hive.ql.plan.ShowTablesDesc in project hive by apache.
the class DDLTask method execute.
@Override
public int execute(DriverContext driverContext) {
if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
return 0;
}
// Create the db
Hive db;
try {
db = Hive.get(conf);
CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
if (null != createDatabaseDesc) {
return createDatabase(db, createDatabaseDesc);
}
DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
if (dropDatabaseDesc != null) {
return dropDatabase(db, dropDatabaseDesc);
}
LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
if (lockDatabaseDesc != null) {
return lockDatabase(db, lockDatabaseDesc);
}
UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
if (unlockDatabaseDesc != null) {
return unlockDatabase(db, unlockDatabaseDesc);
}
SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
if (switchDatabaseDesc != null) {
return switchDatabase(db, switchDatabaseDesc);
}
DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
if (descDatabaseDesc != null) {
return descDatabase(db, descDatabaseDesc);
}
AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
if (alterDatabaseDesc != null) {
return alterDatabase(db, alterDatabaseDesc);
}
CreateTableDesc crtTbl = work.getCreateTblDesc();
if (crtTbl != null) {
return createTable(db, crtTbl);
}
CreateIndexDesc crtIndex = work.getCreateIndexDesc();
if (crtIndex != null) {
return createIndex(db, crtIndex);
}
AlterIndexDesc alterIndex = work.getAlterIndexDesc();
if (alterIndex != null) {
return alterIndex(db, alterIndex);
}
DropIndexDesc dropIdx = work.getDropIdxDesc();
if (dropIdx != null) {
return dropIndex(db, dropIdx);
}
CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
if (crtTblLike != null) {
return createTableLike(db, crtTblLike);
}
DropTableDesc dropTbl = work.getDropTblDesc();
if (dropTbl != null) {
dropTableOrPartitions(db, dropTbl);
return 0;
}
AlterTableDesc alterTbl = work.getAlterTblDesc();
if (alterTbl != null) {
if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
return dropConstraint(db, alterTbl);
} else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
return addConstraint(db, alterTbl);
} else {
return alterTable(db, alterTbl);
}
}
CreateViewDesc crtView = work.getCreateViewDesc();
if (crtView != null) {
return createView(db, crtView);
}
AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
if (addPartitionDesc != null) {
return addPartitions(db, addPartitionDesc);
}
RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
if (renamePartitionDesc != null) {
return renamePartition(db, renamePartitionDesc);
}
AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
if (simpleDesc != null) {
if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
return touch(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
return archive(db, simpleDesc, driverContext);
} else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
return unarchive(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
return compact(db, simpleDesc);
}
}
MsckDesc msckDesc = work.getMsckDesc();
if (msckDesc != null) {
return msck(db, msckDesc);
}
DescTableDesc descTbl = work.getDescTblDesc();
if (descTbl != null) {
return describeTable(db, descTbl);
}
DescFunctionDesc descFunc = work.getDescFunctionDesc();
if (descFunc != null) {
return describeFunction(db, descFunc);
}
ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
if (showDatabases != null) {
return showDatabases(db, showDatabases);
}
ShowTablesDesc showTbls = work.getShowTblsDesc();
if (showTbls != null) {
return showTablesOrViews(db, showTbls);
}
ShowColumnsDesc showCols = work.getShowColumnsDesc();
if (showCols != null) {
return showColumns(db, showCols);
}
ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
if (showTblStatus != null) {
return showTableStatus(db, showTblStatus);
}
ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
if (showTblProperties != null) {
return showTableProperties(db, showTblProperties);
}
ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
if (showFuncs != null) {
return showFunctions(db, showFuncs);
}
ShowLocksDesc showLocks = work.getShowLocksDesc();
if (showLocks != null) {
return showLocks(db, showLocks);
}
ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
if (compactionsDesc != null) {
return showCompactions(db, compactionsDesc);
}
ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
if (txnsDesc != null) {
return showTxns(db, txnsDesc);
}
AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
if (abortTxnsDesc != null) {
return abortTxns(db, abortTxnsDesc);
}
LockTableDesc lockTbl = work.getLockTblDesc();
if (lockTbl != null) {
return lockTable(db, lockTbl);
}
UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
if (unlockTbl != null) {
return unlockTable(db, unlockTbl);
}
ShowPartitionsDesc showParts = work.getShowPartsDesc();
if (showParts != null) {
return showPartitions(db, showParts);
}
ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
if (showCreateDb != null) {
return showCreateDatabase(db, showCreateDb);
}
ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
if (showCreateTbl != null) {
return showCreateTable(db, showCreateTbl);
}
ShowConfDesc showConf = work.getShowConfDesc();
if (showConf != null) {
return showConf(db, showConf);
}
RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
if (roleDDLDesc != null) {
return roleDDL(db, roleDDLDesc);
}
GrantDesc grantDesc = work.getGrantDesc();
if (grantDesc != null) {
return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
}
RevokeDesc revokeDesc = work.getRevokeDesc();
if (revokeDesc != null) {
return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
}
ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
if (showGrantDesc != null) {
return showGrants(db, showGrantDesc);
}
GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
if (grantOrRevokeRoleDDL != null) {
return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
}
ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
if (showIndexes != null) {
return showIndexes(db, showIndexes);
}
AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
if (mergeFilesDesc != null) {
return mergeFiles(db, mergeFilesDesc, driverContext);
}
AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
if (alterPartDesc != null) {
return alterTableAlterPart(db, alterPartDesc);
}
TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
if (truncateTableDesc != null) {
return truncateTable(db, truncateTableDesc);
}
AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
if (alterTableExchangePartition != null) {
return exchangeTablePartition(db, alterTableExchangePartition);
}
CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
if (cacheMetadataDesc != null) {
return cacheMetadata(db, cacheMetadataDesc);
}
InsertTableDesc insertTableDesc = work.getInsertTableDesc();
if (insertTableDesc != null) {
return insertCommitWork(db, insertTableDesc);
}
PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
if (preInsertTableDesc != null) {
return preInsertWork(db, preInsertTableDesc);
}
} catch (Throwable e) {
failed(e);
return 1;
}
assert false;
return 0;
}
use of org.apache.hadoop.hive.ql.plan.ShowTablesDesc in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowViews.
private void analyzeShowViews(ASTNode ast) throws SemanticException {
ShowTablesDesc showViewsDesc;
String dbName = SessionState.get().getCurrentDatabase();
String viewNames = null;
if (ast.getChildCount() > 3) {
throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg());
}
switch(ast.getChildCount()) {
case // Uses a pattern
1:
viewNames = unescapeSQLString(ast.getChild(0).getText());
showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW);
break;
case // Specifies a DB
2:
assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
dbName = unescapeIdentifier(ast.getChild(1).getText());
validateDatabase(dbName);
showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
showViewsDesc.setType(TableType.VIRTUAL_VIEW);
break;
case // Uses a pattern and specifies a DB
3:
assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
dbName = unescapeIdentifier(ast.getChild(1).getText());
viewNames = unescapeSQLString(ast.getChild(2).getText());
validateDatabase(dbName);
showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW);
break;
default:
// No pattern or DB
showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
showViewsDesc.setType(TableType.VIRTUAL_VIEW);
break;
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showViewsDesc), conf));
setFetchTask(createFetchTask(showViewsDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.plan.ShowTablesDesc in project hive by apache.
the class HCatSemanticAnalyzer method authorizeDDLWork.
@Override
protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) throws HiveException {
// DB opereations, none of them are enforced by Hive right now.
ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
if (showDatabases != null) {
authorize(HiveOperation.SHOWDATABASES.getInputRequiredPrivileges(), HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges());
}
DropDatabaseDesc dropDb = work.getDropDatabaseDesc();
if (dropDb != null) {
Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName());
if (db != null) {
// if above returned a null, then the db does not exist - probably a
// "drop database if exists" clause - don't try to authorize then.
authorize(db, Privilege.DROP);
}
}
DescDatabaseDesc descDb = work.getDescDatabaseDesc();
if (descDb != null) {
Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName());
authorize(db, Privilege.SELECT);
}
SwitchDatabaseDesc switchDb = work.getSwitchDatabaseDesc();
if (switchDb != null) {
Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName());
authorize(db, Privilege.SELECT);
}
ShowTablesDesc showTables = work.getShowTblsDesc();
if (showTables != null) {
String dbName = showTables.getDbName() == null ? SessionState.get().getCurrentDatabase() : showTables.getDbName();
authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
}
ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc();
if (showTableStatus != null) {
String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase() : showTableStatus.getDbName();
authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
}
// TODO: add alter database support in HCat
// Table operations.
DropTableDesc dropTable = work.getDropTblDesc();
if (dropTable != null) {
if (dropTable.getPartSpecs() == null) {
// drop table is already enforced by Hive. We only check for table level location even if the
// table is partitioned.
} else {
//this is actually a ALTER TABLE DROP PARITITION statement
for (DropTableDesc.PartSpec partSpec : dropTable.getPartSpecs()) {
// partitions are not added as write entries in drop partitions in Hive
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName());
List<Partition> partitions = null;
try {
partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString());
} catch (Exception e) {
throw new HiveException(e);
}
for (Partition part : partitions) {
authorize(part, Privilege.DROP);
}
}
}
}
AlterTableDesc alterTable = work.getAlterTblDesc();
if (alterTable != null) {
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), Utilities.getDbTableName(alterTable.getOldName())[1], false);
Partition part = null;
if (alterTable.getPartSpec() != null) {
part = hive.getPartition(table, alterTable.getPartSpec(), false);
}
String newLocation = alterTable.getNewLocation();
/* Hcat requires ALTER_DATA privileges for ALTER TABLE LOCATION statements
* for the old table/partition location and the new location.
*/
if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
if (part != null) {
// authorize for the old
authorize(part, Privilege.ALTER_DATA);
// location, and new location
part.setLocation(newLocation);
authorize(part, Privilege.ALTER_DATA);
} else {
// authorize for the old
authorize(table, Privilege.ALTER_DATA);
// location, and new location
table.getTTable().getSd().setLocation(newLocation);
authorize(table, Privilege.ALTER_DATA);
}
}
//other alter operations are already supported by Hive
}
// we should be careful when authorizing table based on just the
// table name. If columns have separate authorization domain, it
// must be honored
DescTableDesc descTable = work.getDescTblDesc();
if (descTable != null) {
String tableName = extractTableName(descTable.getTableName());
authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
}
ShowPartitionsDesc showParts = work.getShowPartsDesc();
if (showParts != null) {
String tableName = extractTableName(showParts.getTabName());
authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
}
}
use of org.apache.hadoop.hive.ql.plan.ShowTablesDesc in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowTables.
private void analyzeShowTables(ASTNode ast) throws SemanticException {
ShowTablesDesc showTblsDesc;
String dbName = SessionState.get().getCurrentDatabase();
String tableNames = null;
if (ast.getChildCount() > 3) {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(ast.toStringTree()));
}
switch(ast.getChildCount()) {
case // Uses a pattern
1:
tableNames = unescapeSQLString(ast.getChild(0).getText());
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames);
break;
case // Specifies a DB
2:
assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
dbName = unescapeIdentifier(ast.getChild(1).getText());
validateDatabase(dbName);
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
break;
case // Uses a pattern and specifies a DB
3:
assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
dbName = unescapeIdentifier(ast.getChild(1).getText());
tableNames = unescapeSQLString(ast.getChild(2).getText());
validateDatabase(dbName);
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames);
break;
default:
// No pattern or DB
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
break;
}
inputs.add(new ReadEntity(getDatabase(dbName)));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblsDesc), conf));
setFetchTask(createFetchTask(showTblsDesc.getSchema()));
}
Aggregations