use of org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc in project hive by apache.
the class AlterTableSetOwnerAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) command.getChild(0));
if (ownerPrincipal.getType() == null) {
throw new SemanticException("Owner type can't be null in alter table set owner command");
}
if (ownerPrincipal.getName() == null) {
throw new SemanticException("Owner name can't be null in alter table set owner command");
}
AlterTableSetOwnerDesc desc = new AlterTableSetOwnerDesc(tableName, ownerPrincipal);
addInputsOutputsAlterTable(tableName, null, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
setAcidDdlDesc(getTable(tableName), desc);
}
use of org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc in project hive by apache.
the class AlterDatabaseSetOwnerAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String databaseName = getUnescapedName((ASTNode) root.getChild(0));
PrincipalDesc principalDesc = AuthorizationParseUtils.getPrincipalDesc((ASTNode) root.getChild(1));
if (principalDesc.getName() == null) {
throw new SemanticException("Owner name can't be null in alter database set owner command");
}
if (principalDesc.getType() == null) {
throw new SemanticException("Owner type can't be null in alter database set owner command");
}
AlterDatabaseSetOwnerDesc desc = new AlterDatabaseSetOwnerDesc(databaseName, principalDesc, null);
addAlterDatabaseDesc(desc);
}
use of org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc in project hive by apache.
the class AlterDataConnectorSetOwnerAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String connectorName = getUnescapedName((ASTNode) root.getChild(0));
PrincipalDesc principalDesc = AuthorizationParseUtils.getPrincipalDesc((ASTNode) root.getChild(1));
if (principalDesc.getName() == null) {
throw new SemanticException("Owner name can't be null in alter connector set owner command");
}
if (principalDesc.getType() == null) {
throw new SemanticException("Owner type can't be null in alter connector set owner command");
}
AlterDataConnectorSetOwnerDesc desc = new AlterDataConnectorSetOwnerDesc(connectorName, principalDesc);
addAlterDataConnectorDesc(desc);
}
use of org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc in project hive by apache.
the class AlterDatabaseHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
try {
Database oldDb = msg.getDbObjBefore();
Database newDb = msg.getDbObjAfter();
AbstractAlterDatabaseDesc alterDbDesc;
if ((oldDb.getOwnerType() == newDb.getOwnerType()) && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
// If owner information is unchanged, then DB properties would've changed
Map<String, String> newDbProps = new HashMap<>();
Map<String, String> dbProps = newDb.getParameters();
for (Map.Entry<String, String> entry : dbProps.entrySet()) {
String key = entry.getKey();
// Ignore the keys which are local to source warehouse
if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID_SOURCE.toString()) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID_TARGET.toString()) || key.equals(ReplUtils.REPL_CHECKPOINT_KEY) || key.equals(ReplChangeManager.SOURCE_OF_REPLICATION) || key.equals(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) || key.equals(ReplConst.REPL_FAILOVER_ENDPOINT)) {
continue;
}
newDbProps.put(key, entry.getValue());
}
alterDbDesc = new AlterDatabaseSetPropertiesDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec());
} else {
alterDbDesc = new AlterDatabaseSetOwnerDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec());
}
Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName);
// Only database object is updated
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
return Collections.singletonList(alterDbTask);
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc in project hive by apache.
the class TestHiveAuthorizationTaskFactory method testRevokeRoleRole.
/**
* REVOKE ROLE ... FROM ROLE ...
*/
@Test
public void testRevokeRoleRole() throws Exception {
DDLWork work = analyze("REVOKE ROLE " + ROLE + " FROM ROLE " + ROLE);
RevokeRoleDesc grantDesc = (RevokeRoleDesc) work.getDDLDesc();
Assert.assertNotNull("Grant should not be null", grantDesc);
Assert.assertFalse("With admin option is not specified", grantDesc.isGrantOption());
Assert.assertEquals(currentUser, grantDesc.getGrantor());
for (String role : ListSizeMatcher.inList(grantDesc.getRoles()).ofSize(1)) {
Assert.assertEquals(ROLE, role);
}
for (PrincipalDesc principal : ListSizeMatcher.inList(grantDesc.getPrincipals()).ofSize(1)) {
Assert.assertEquals(PrincipalType.ROLE, principal.getType());
Assert.assertEquals(ROLE, principal.getName());
}
}
Aggregations