use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class DDLSemanticAnalyzer method analyzeDropDatabase.
private void analyzeDropDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifExists = false;
boolean ifCascade = false;
if (null != ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS)) {
ifExists = true;
}
if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) {
ifCascade = true;
}
Database database = getDatabase(dbName, !ifExists);
if (database == null) {
return;
}
// if cascade=true, then we need to authorize the drop table action as well
if (ifCascade) {
// add the tables as well to outputs
List<String> tableNames;
// get names of all tables under this dbName
try {
tableNames = db.getAllTables(dbName);
} catch (HiveException e) {
throw new SemanticException(e);
}
// add tables to outputs
if (tableNames != null) {
for (String tableName : tableNames) {
Table table = getTable(dbName, tableName, true);
// We want no lock here, as the database lock will cover the tables,
// and putting a lock will actually cause us to deadlock on ourselves.
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
}
}
}
inputs.add(new ReadEntity(database));
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE));
DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade, new ReplicationSpec());
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc)));
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class Utils method resetDbBootstrapDumpState.
public static void resetDbBootstrapDumpState(Hive hiveDb, String dbName, String uniqueKey) throws HiveException {
Database database = hiveDb.getDatabase(dbName);
if (database != null) {
Map<String, String> params = database.getParameters();
if ((params != null) && params.containsKey(uniqueKey)) {
params.remove(uniqueKey);
database.setParameters(params);
hiveDb.alterDatabase(dbName, database);
LOG.info("REPL DUMP:: Reset property for Database: {}, Property: {}", dbName, uniqueKey);
}
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class AlterDatabaseHandler method handle.
@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
if (!context.isTableNameEmpty()) {
throw new SemanticException("Alter Database are not supported for table-level replication");
}
AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
try {
Database oldDb = msg.getDbObjBefore();
Database newDb = msg.getDbObjAfter();
AlterDatabaseDesc alterDbDesc;
if ((oldDb.getOwnerType() == newDb.getOwnerType()) && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
// If owner information is unchanged, then DB properties would've changed
Map<String, String> newDbProps = new HashMap<>();
Map<String, String> dbProps = newDb.getParameters();
for (Map.Entry<String, String> entry : dbProps.entrySet()) {
String key = entry.getKey();
// Ignore the keys which are local to source warehouse
if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID.toString())) {
continue;
}
newDbProps.put(key, entry.getValue());
}
alterDbDesc = new AlterDatabaseDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec());
} else {
alterDbDesc = new AlterDatabaseDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec());
}
Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, alterDbDesc));
context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName);
// Only database object is updated
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
return Collections.singletonList(alterDbTask);
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class ImportSemanticAnalyzer method fixLocationInPartSpec.
/**
* Helper method to set location properly in partSpec
*/
private static void fixLocationInPartSpec(FileSystem fs, ImportTableDesc tblDesc, Table table, Warehouse wh, ReplicationSpec replicationSpec, AddPartitionDesc.OnePartitionDesc partSpec, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, HiveException, IOException {
Path tgtPath = null;
if (tblDesc.getLocation() == null) {
if (table.getDataLocation() != null) {
tgtPath = new Path(table.getDataLocation().toString(), Warehouse.makePartPath(partSpec.getPartSpec()));
} else {
Database parentDb = x.getHive().getDatabase(tblDesc.getDatabaseName());
tgtPath = new Path(wh.getDefaultTablePath(parentDb, tblDesc.getTableName()), Warehouse.makePartPath(partSpec.getPartSpec()));
}
} else {
tgtPath = new Path(tblDesc.getLocation(), Warehouse.makePartPath(partSpec.getPartSpec()));
}
FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), x.getConf());
checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec, x.getLOG());
partSpec.setLocation(tgtPath.toString());
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class MacroSemanticAnalyzer method addEntities.
private void addEntities() throws SemanticException {
Database database = getDatabase(Warehouse.DEFAULT_DATABASE_NAME);
// This restricts macro creation to privileged users.
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
}
Aggregations