use of org.apache.hadoop.hive.ql.plan.DropTableDesc in project hive by apache.
the class DDLTask method execute.
@Override
public int execute(DriverContext driverContext) {
if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
return 0;
}
// Create the db
Hive db;
try {
db = Hive.get(conf);
CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
if (null != createDatabaseDesc) {
return createDatabase(db, createDatabaseDesc);
}
DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
if (dropDatabaseDesc != null) {
return dropDatabase(db, dropDatabaseDesc);
}
LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
if (lockDatabaseDesc != null) {
return lockDatabase(db, lockDatabaseDesc);
}
UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
if (unlockDatabaseDesc != null) {
return unlockDatabase(db, unlockDatabaseDesc);
}
SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
if (switchDatabaseDesc != null) {
return switchDatabase(db, switchDatabaseDesc);
}
DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
if (descDatabaseDesc != null) {
return descDatabase(db, descDatabaseDesc);
}
AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
if (alterDatabaseDesc != null) {
return alterDatabase(db, alterDatabaseDesc);
}
CreateTableDesc crtTbl = work.getCreateTblDesc();
if (crtTbl != null) {
return createTable(db, crtTbl);
}
CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
if (crtTblLike != null) {
return createTableLike(db, crtTblLike);
}
DropTableDesc dropTbl = work.getDropTblDesc();
if (dropTbl != null) {
dropTableOrPartitions(db, dropTbl);
return 0;
}
AlterTableDesc alterTbl = work.getAlterTblDesc();
if (alterTbl != null) {
if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) {
// no alter, the table is missing either due to drop/rename which follows the alter.
// or the existing table is newer than our update.
LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName());
return 0;
}
if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
return dropConstraint(db, alterTbl);
} else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
return addConstraints(db, alterTbl);
} else {
return alterTable(db, alterTbl);
}
}
CreateViewDesc crtView = work.getCreateViewDesc();
if (crtView != null) {
return createView(db, crtView);
}
AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
if (addPartitionDesc != null) {
return addPartitions(db, addPartitionDesc);
}
RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
if (renamePartitionDesc != null) {
return renamePartition(db, renamePartitionDesc);
}
AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
if (simpleDesc != null) {
if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
return touch(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
return archive(db, simpleDesc, driverContext);
} else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
return unarchive(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
return compact(db, simpleDesc);
}
}
MsckDesc msckDesc = work.getMsckDesc();
if (msckDesc != null) {
return msck(db, msckDesc);
}
DescTableDesc descTbl = work.getDescTblDesc();
if (descTbl != null) {
return describeTable(db, descTbl);
}
DescFunctionDesc descFunc = work.getDescFunctionDesc();
if (descFunc != null) {
return describeFunction(db, descFunc);
}
ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
if (showDatabases != null) {
return showDatabases(db, showDatabases);
}
ShowTablesDesc showTbls = work.getShowTblsDesc();
if (showTbls != null) {
return showTablesOrViews(db, showTbls);
}
ShowColumnsDesc showCols = work.getShowColumnsDesc();
if (showCols != null) {
return showColumns(db, showCols);
}
ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
if (showTblStatus != null) {
return showTableStatus(db, showTblStatus);
}
ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
if (showTblProperties != null) {
return showTableProperties(db, showTblProperties);
}
ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
if (showFuncs != null) {
return showFunctions(db, showFuncs);
}
ShowLocksDesc showLocks = work.getShowLocksDesc();
if (showLocks != null) {
return showLocks(db, showLocks);
}
ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
if (compactionsDesc != null) {
return showCompactions(db, compactionsDesc);
}
ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
if (txnsDesc != null) {
return showTxns(db, txnsDesc);
}
AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
if (abortTxnsDesc != null) {
return abortTxns(db, abortTxnsDesc);
}
LockTableDesc lockTbl = work.getLockTblDesc();
if (lockTbl != null) {
return lockTable(db, lockTbl);
}
UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
if (unlockTbl != null) {
return unlockTable(db, unlockTbl);
}
ShowPartitionsDesc showParts = work.getShowPartsDesc();
if (showParts != null) {
return showPartitions(db, showParts);
}
ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
if (showCreateDb != null) {
return showCreateDatabase(db, showCreateDb);
}
ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
if (showCreateTbl != null) {
return showCreateTable(db, showCreateTbl);
}
ShowConfDesc showConf = work.getShowConfDesc();
if (showConf != null) {
return showConf(db, showConf);
}
RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
if (roleDDLDesc != null) {
return roleDDL(db, roleDDLDesc);
}
GrantDesc grantDesc = work.getGrantDesc();
if (grantDesc != null) {
return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
}
RevokeDesc revokeDesc = work.getRevokeDesc();
if (revokeDesc != null) {
return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
}
ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
if (showGrantDesc != null) {
return showGrants(db, showGrantDesc);
}
GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
if (grantOrRevokeRoleDDL != null) {
return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
}
AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
if (mergeFilesDesc != null) {
return mergeFiles(db, mergeFilesDesc, driverContext);
}
AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
if (alterPartDesc != null) {
return alterTableAlterPart(db, alterPartDesc);
}
TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
if (truncateTableDesc != null) {
return truncateTable(db, truncateTableDesc);
}
AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
if (alterTableExchangePartition != null) {
return exchangeTablePartition(db, alterTableExchangePartition);
}
CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
if (cacheMetadataDesc != null) {
return cacheMetadata(db, cacheMetadataDesc);
}
InsertTableDesc insertTableDesc = work.getInsertTableDesc();
if (insertTableDesc != null) {
return insertCommitWork(db, insertTableDesc);
}
PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
if (preInsertTableDesc != null) {
return preInsertWork(db, preInsertTableDesc);
}
KillQueryDesc killQueryDesc = work.getKillQueryDesc();
if (killQueryDesc != null) {
return killQuery(db, killQueryDesc);
}
if (work.getCreateResourcePlanDesc() != null) {
return createResourcePlan(db, work.getCreateResourcePlanDesc());
}
if (work.getShowResourcePlanDesc() != null) {
return showResourcePlans(db, work.getShowResourcePlanDesc());
}
if (work.getAlterResourcePlanDesc() != null) {
return alterResourcePlan(db, work.getAlterResourcePlanDesc());
}
if (work.getDropResourcePlanDesc() != null) {
return dropResourcePlan(db, work.getDropResourcePlanDesc());
}
if (work.getCreateWMTriggerDesc() != null) {
return createWMTrigger(db, work.getCreateWMTriggerDesc());
}
if (work.getAlterWMTriggerDesc() != null) {
return alterWMTrigger(db, work.getAlterWMTriggerDesc());
}
if (work.getDropWMTriggerDesc() != null) {
return dropWMTrigger(db, work.getDropWMTriggerDesc());
}
if (work.getWmPoolDesc() != null) {
return createOrAlterWMPool(db, work.getWmPoolDesc());
}
if (work.getDropWMPoolDesc() != null) {
return dropWMPool(db, work.getDropWMPoolDesc());
}
if (work.getWmMappingDesc() != null) {
return createOrAlterWMMapping(db, work.getWmMappingDesc());
}
if (work.getDropWMMappingDesc() != null) {
return dropWMMapping(db, work.getDropWMMappingDesc());
}
if (work.getTriggerToPoolMappingDesc() != null) {
return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc());
}
if (work.getAlterMaterializedViewDesc() != null) {
return alterMaterializedView(db, work.getAlterMaterializedViewDesc());
}
} catch (Throwable e) {
failed(e);
return 1;
}
assert false;
return 0;
}
use of org.apache.hadoop.hive.ql.plan.DropTableDesc in project hive by apache.
the class DDLSemanticAnalyzer method analyzeDropTable.
private void analyzeDropTable(ASTNode ast, TableType expectedType) throws SemanticException {
String tableName = getUnescapedName((ASTNode) ast.getChild(0));
boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
// we want to signal an error if the table/view doesn't exist and we're
// configured not to fail silently
boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
ReplicationSpec replicationSpec = new ReplicationSpec(ast);
Table tab = getTable(tableName, throwException);
if (tab != null) {
inputs.add(new ReadEntity(tab));
outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
}
boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectedType, ifExists, ifPurge, replicationSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc)));
}
use of org.apache.hadoop.hive.ql.plan.DropTableDesc in project hive by apache.
the class DDLSemanticAnalyzer method analyzeAlterTableDropParts.
private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean expectView) throws SemanticException {
boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) || HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
// If the drop has to fail on non-existent partitions, we cannot batch expressions.
// That is because we actually have to check each separate expression for existence.
// We could do a small optimization for the case where expr has all columns and all
// operators are equality, if we assume those would always match one partition (which
// may not be true with legacy, non-normalized column values). This is probably a
// popular case but that's kinda hacky. Let's not do it for now.
boolean canGroupExprs = ifExists;
boolean mustPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
ReplicationSpec replicationSpec = new ReplicationSpec(ast);
Table tab = null;
try {
tab = getTable(qualified);
} catch (SemanticException se) {
if (replicationSpec.isInReplicationScope() && ((se.getCause() instanceof InvalidTableException) || (se.getMessage().contains(ErrorMsg.INVALID_TABLE.getMsg())))) {
// We just return in that case, no drop needed.
return;
// TODO : the contains message check is fragile, we should refactor SemanticException to be
// queriable for error code, and not simply have a message
// NOTE : IF_EXISTS might also want to invoke this, but there's a good possibility
// that IF_EXISTS is stricter about table existence, and applies only to the ptn.
// Therefore, ignoring IF_EXISTS here.
} else {
throw se;
}
}
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = getFullPartitionSpecs(ast, tab, canGroupExprs);
if (partSpecs.isEmpty()) {
// nothing to do
return;
}
validateAlterTableType(tab, AlterTableTypes.DROPPARTITION, expectView);
ReadEntity re = new ReadEntity(tab);
re.noLockNeeded();
inputs.add(re);
addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists);
DropTableDesc dropTblDesc = new DropTableDesc(getDotName(qualified), partSpecs, expectView ? TableType.VIRTUAL_VIEW : null, mustPurge, replicationSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc)));
}
use of org.apache.hadoop.hive.ql.plan.DropTableDesc in project hive by apache.
the class DDLTask method dropPartitions.
private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
if (replicationSpec.isInReplicationScope()) {
// parameter key values.
for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()) {
List<Partition> partitions = new ArrayList<>();
try {
db.getPartitionsByExpr(tbl, partSpec.getPartSpec(), conf, partitions);
for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())) {
db.dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true);
}
} catch (NoSuchObjectException e) {
// ignore NSOE because that means there's nothing to drop.
} catch (Exception e) {
throw new HiveException(e.getMessage(), e);
}
}
return;
}
// ifExists is currently verified in DDLSemanticAnalyzer
List<Partition> droppedParts = db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(dropTbl.getIfPurge()));
for (Partition partition : droppedParts) {
console.printInfo("Dropped the partition " + partition.getName());
// We have already locked the table, don't lock the partitions.
addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK));
}
;
}
Aggregations