use of org.apache.hadoop.hive.ql.ddl.DDLDesc in project hive by apache.
the class AbstractVMMappingAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
if (root.getChildCount() < 4 || root.getChildCount() > 5) {
throw new SemanticException("Invalid syntax for create or alter mapping.");
}
String resourcePlanName = unescapeIdentifier(root.getChild(0).getText());
String entityType = root.getChild(1).getText();
String entityName = PlanUtils.stripQuotes(root.getChild(2).getText());
String poolPath = root.getChild(3).getType() == HiveParser.TOK_UNMANAGED ? null : // Null path => unmanaged
WMUtils.poolPath(root.getChild(3));
Integer ordering = root.getChildCount() == 5 ? Integer.valueOf(root.getChild(4).getText()) : null;
DDLDesc desc = getDesc(resourcePlanName, entityType, entityName, poolPath, ordering);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
DDLUtils.addServiceOutput(conf, getOutputs());
}
use of org.apache.hadoop.hive.ql.ddl.DDLDesc in project hive by apache.
the class CreateTableHook method postAnalyze.
@Override
public void postAnalyze(HiveSemanticAnalyzerHookContext context, List<Task<?>> rootTasks) throws SemanticException {
if (rootTasks.size() == 0) {
// There will be no DDL task created in case if its CREATE TABLE IF NOT EXISTS
return;
}
Task<?> t = rootTasks.get(rootTasks.size() - 1);
if (!(t instanceof DDLTask)) {
return;
}
DDLTask task = (DDLTask) t;
DDLDesc d = task.getWork().getDDLDesc();
if (!(d instanceof CreateTableDesc)) {
return;
}
CreateTableDesc desc = (CreateTableDesc) d;
Map<String, String> tblProps = desc.getTblProps();
if (tblProps == null) {
// tblProps will be null if user didnt use tblprops in his CREATE
// TABLE cmd.
tblProps = new HashMap<String, String>();
}
// first check if we will allow the user to create table.
String storageHandler = desc.getStorageHandler();
if (StringUtils.isNotEmpty(storageHandler)) {
try {
HiveStorageHandler storageHandlerInst = HCatUtil.getStorageHandler(context.getConf(), desc.getStorageHandler(), desc.getSerName(), desc.getInputFormat(), desc.getOutputFormat());
// Authorization checks are performed by the storageHandler.getAuthorizationProvider(), if
// StorageDelegationAuthorizationProvider is used.
} catch (IOException e) {
throw new SemanticException(e);
}
}
try {
Table table = context.getHive().newTable(desc.getDbTableName());
if (desc.getLocation() != null) {
table.setDataLocation(new Path(desc.getLocation()));
}
if (desc.getStorageHandler() != null) {
table.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, desc.getStorageHandler());
}
for (Map.Entry<String, String> prop : tblProps.entrySet()) {
table.setProperty(prop.getKey(), prop.getValue());
}
for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
table.setSerdeParam(prop.getKey(), prop.getValue());
}
if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) {
authorize(table, Privilege.CREATE);
}
} catch (HiveException ex) {
throw new SemanticException(ex);
}
desc.setTblProps(tblProps);
context.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, tableName);
}
use of org.apache.hadoop.hive.ql.ddl.DDLDesc in project hive by apache.
the class HCatSemanticAnalyzer method authorizeDDLWork.
@Override
protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) throws HiveException {
DDLDesc ddlDesc = work.getDDLDesc();
if (ddlDesc instanceof ShowDatabasesDesc) {
authorize(HiveOperation.SHOWDATABASES.getInputRequiredPrivileges(), HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges());
} else if (ddlDesc instanceof DropDatabaseDesc) {
DropDatabaseDesc dropDb = (DropDatabaseDesc) ddlDesc;
Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName());
if (db != null) {
// if above returned a null, then the db does not exist - probably a
// "drop database if exists" clause - don't try to authorize then.
authorize(db, Privilege.DROP);
}
} else if (ddlDesc instanceof DescDatabaseDesc) {
DescDatabaseDesc descDb = (DescDatabaseDesc) ddlDesc;
Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName());
authorize(db, Privilege.SELECT);
} else if (ddlDesc instanceof SwitchDatabaseDesc) {
SwitchDatabaseDesc switchDb = (SwitchDatabaseDesc) ddlDesc;
Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName());
authorize(db, Privilege.SELECT);
} else if (ddlDesc instanceof ShowTablesDesc) {
ShowTablesDesc showTables = (ShowTablesDesc) ddlDesc;
String dbName = showTables.getDbName() == null ? SessionState.get().getCurrentDatabase() : showTables.getDbName();
authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
} else if (ddlDesc instanceof DescTableDesc) {
// we should be careful when authorizing table based on just the
// table name. If columns have separate authorization domain, it
// must be honored
DescTableDesc descTable = (DescTableDesc) ddlDesc;
String tableName = extractTableName(descTable.getDbTableName());
authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
} else if (ddlDesc instanceof ShowTableStatusDesc) {
ShowTableStatusDesc showTableStatus = (ShowTableStatusDesc) ddlDesc;
String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase() : showTableStatus.getDbName();
authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
} else if (ddlDesc instanceof AlterTableDropPartitionDesc) {
AlterTableDropPartitionDesc dropPartition = (AlterTableDropPartitionDesc) ddlDesc;
// this is actually a ALTER TABLE DROP PARITITION statement
for (AlterTableDropPartitionDesc.PartitionDesc partSpec : dropPartition.getPartSpecs()) {
// partitions are not added as write entries in drop partitions in Hive
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropPartition.getTableName());
List<Partition> partitions = null;
try {
partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString());
} catch (Exception e) {
throw new HiveException(e);
}
for (Partition part : partitions) {
authorize(part, Privilege.DROP);
}
}
} else if (ddlDesc instanceof ShowPartitionsDesc) {
ShowPartitionsDesc showParts = (ShowPartitionsDesc) ddlDesc;
String tableName = extractTableName(showParts.getTabName());
authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
} else if (ddlDesc instanceof AlterTableSetLocationDesc) {
AlterTableSetLocationDesc alterTable = (AlterTableSetLocationDesc) ddlDesc;
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), Utilities.getDbTableName(alterTable.getDbTableName())[1], false);
Partition part = null;
if (alterTable.getPartitionSpec() != null) {
part = hive.getPartition(table, alterTable.getPartitionSpec(), false);
}
String newLocation = alterTable.getLocation();
/* Hcat requires ALTER_DATA privileges for ALTER TABLE LOCATION statements
* for the old table/partition location and the new location.
*/
if (part != null) {
// authorize for the old
authorize(part, Privilege.ALTER_DATA);
// location, and new location
part.setLocation(newLocation);
authorize(part, Privilege.ALTER_DATA);
} else {
// authorize for the old
authorize(table, Privilege.ALTER_DATA);
// location, and new location
table.getTTable().getSd().setLocation(newLocation);
authorize(table, Privilege.ALTER_DATA);
}
}
}
use of org.apache.hadoop.hive.ql.ddl.DDLDesc in project hive by apache.
the class CreateDatabaseHook method authorizeDDLWork.
@Override
protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) throws HiveException {
DDLDesc ddlDesc = work.getDDLDesc();
if (ddlDesc instanceof CreateDatabaseDesc) {
CreateDatabaseDesc createDb = (CreateDatabaseDesc) ddlDesc;
Database db = new Database(createDb.getName(), createDb.getComment(), createDb.getLocationUri(), createDb.getDatabaseProperties());
authorize(db, Privilege.CREATE);
}
}
use of org.apache.hadoop.hive.ql.ddl.DDLDesc in project hive by apache.
the class TaskCompiler method patchUpAfterCTASorMaterializedView.
private void patchUpAfterCTASorMaterializedView(List<Task<?>> rootTasks, Set<ReadEntity> inputs, Set<WriteEntity> outputs, Task<?> createTask, boolean createTaskAfterMoveTask) {
// clear the mapredWork output file from outputs for CTAS
// DDLWork at the tail of the chain will have the output
Iterator<WriteEntity> outIter = outputs.iterator();
while (outIter.hasNext()) {
switch(outIter.next().getType()) {
case DFS_DIR:
case LOCAL_DIR:
outIter.remove();
break;
default:
break;
}
}
// find all leaf tasks and make the DDLTask as a dependent task on all of them
Set<Task<?>> leaves = new LinkedHashSet<>();
getLeafTasks(rootTasks, leaves);
assert (leaves.size() > 0);
// Target task is supposed to be the last task
Task<?> targetTask = createTask;
for (Task<?> task : leaves) {
if (task instanceof StatsTask) {
// StatsTask require table to already exist
for (Task<?> parentOfStatsTask : task.getParentTasks()) {
if (parentOfStatsTask instanceof MoveTask && !createTaskAfterMoveTask) {
// For partitioned CTAS, we need to create the table before the move task
// as we need to create the partitions in metastore and for that we should
// have already registered the table
interleaveTask(parentOfStatsTask, createTask);
} else {
parentOfStatsTask.addDependentTask(createTask);
}
}
for (Task<?> parentOfCrtTblTask : createTask.getParentTasks()) {
parentOfCrtTblTask.removeDependentTask(task);
}
createTask.addDependentTask(task);
targetTask = task;
} else if (task instanceof MoveTask && !createTaskAfterMoveTask) {
// For partitioned CTAS, we need to create the table before the move task
// as we need to create the partitions in metastore and for that we should
// have already registered the table
interleaveTask(task, createTask);
targetTask = task;
} else {
task.addDependentTask(createTask);
}
}
// Add task to insert / delete materialized view from registry if needed
if (createTask instanceof DDLTask) {
DDLTask ddlTask = (DDLTask) createTask;
DDLWork work = ddlTask.getWork();
DDLDesc desc = work.getDDLDesc();
if (desc instanceof CreateMaterializedViewDesc) {
CreateMaterializedViewDesc createViewDesc = (CreateMaterializedViewDesc) desc;
String tableName = createViewDesc.getViewName();
boolean retrieveAndInclude = createViewDesc.isRewriteEnabled();
MaterializedViewUpdateDesc materializedViewUpdateDesc = new MaterializedViewUpdateDesc(tableName, retrieveAndInclude, false, false);
DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewUpdateDesc);
targetTask.addDependentTask(TaskFactory.get(ddlWork, conf));
} else if (desc instanceof AlterMaterializedViewRewriteDesc) {
AlterMaterializedViewRewriteDesc alterMVRewriteDesc = (AlterMaterializedViewRewriteDesc) desc;
String tableName = alterMVRewriteDesc.getMaterializedViewName();
boolean retrieveAndInclude = alterMVRewriteDesc.isRewriteEnable();
boolean disableRewrite = !alterMVRewriteDesc.isRewriteEnable();
MaterializedViewUpdateDesc materializedViewUpdateDesc = new MaterializedViewUpdateDesc(tableName, retrieveAndInclude, disableRewrite, false);
DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewUpdateDesc);
targetTask.addDependentTask(TaskFactory.get(ddlWork, conf));
}
}
}
Aggregations