use of org.apache.hadoop.hive.ql.parse.repl.load.MetaData in project hive by apache.
the class CreateDatabaseHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
MetaData metaData;
try {
FileSystem fs = FileSystem.get(new Path(context.location).toUri(), context.hiveConf);
metaData = EximUtil.readMetaData(fs, new Path(context.location, EximUtil.METADATA_NAME));
} catch (IOException e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
}
Database db = metaData.getDatabase();
String destinationDBName = context.dbName == null ? db.getName() : context.dbName;
CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, null, true, db.getParameters());
Task<DDLWork> createDBTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
if (!db.getParameters().isEmpty()) {
AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(destinationDBName, db.getParameters(), context.eventOnlyReplicationSpec());
Task<DDLWork> alterDbProperties = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
createDBTask.addDependentTask(alterDbProperties);
}
if (StringUtils.isNotEmpty(db.getOwnerName())) {
AlterDatabaseSetOwnerDesc alterDbOwner = new AlterDatabaseSetOwnerDesc(destinationDBName, new PrincipalDesc(db.getOwnerName(), db.getOwnerType()), context.eventOnlyReplicationSpec());
Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbOwner, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
createDBTask.addDependentTask(alterDbTask);
}
updatedMetadata.set(context.dmd.getEventTo().toString(), destinationDBName, null, null);
return Collections.singletonList(createDBTask);
}
use of org.apache.hadoop.hive.ql.parse.repl.load.MetaData in project hive by apache.
the class TableHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
try {
List<Task<?>> importTasks = new ArrayList<>();
boolean isExternal = false, isLocationSet = false;
String parsedLocation = null;
DumpType eventType = context.dmd.getDumpType();
Tuple tuple = extract(context);
MetaData rv = EximUtil.getMetaDataFromLocation(context.location, context.hiveConf);
if (tuple.isExternalTable) {
isLocationSet = true;
isExternal = true;
Table table = new Table(rv.getTable());
parsedLocation = ReplExternalTables.externalTableLocation(context.hiveConf, table.getSd().getLocation());
}
context.nestedContext.setConf(context.hiveConf);
EximUtil.SemanticAnalyzerWrapperContext x = new EximUtil.SemanticAnalyzerWrapperContext(context.hiveConf, context.db, readEntitySet, writeEntitySet, importTasks, context.log, context.nestedContext);
x.setEventType(eventType);
// REPL LOAD is not partition level. It is always DB or table level. So, passing null for partition specs.
if (TableType.VIRTUAL_VIEW.name().equals(rv.getTable().getTableType())) {
importTasks.add(ReplLoadTask.createViewTask(rv, context.dbName, context.hiveConf, context.getDumpDirectory(), context.getMetricCollector()));
} else {
ImportSemanticAnalyzer.prepareImport(false, isLocationSet, isExternal, false, (context.precursor != null), parsedLocation, null, context.dbName, null, context.location, x, updatedMetadata, context.getTxnMgr(), tuple.writeId, rv, context.getDumpDirectory(), context.getMetricCollector());
}
Task<?> openTxnTask = x.getOpenTxnTask();
if (openTxnTask != null && !importTasks.isEmpty()) {
for (Task<?> t : importTasks) {
openTxnTask.addDependentTask(t);
}
importTasks.add(openTxnTask);
}
return importTasks;
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new SemanticException(e);
}
}
use of org.apache.hadoop.hive.ql.parse.repl.load.MetaData in project hive by apache.
the class ImportSemanticAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
try {
Tree fromTree = ast.getChild(0);
boolean isLocationSet = false;
boolean isExternalSet = false;
boolean isPartSpecSet = false;
String parsedLocation = null;
String parsedTableName = null;
String parsedDbName = null;
LinkedHashMap<String, String> parsedPartSpec = new LinkedHashMap<String, String>();
// waitOnPrecursor determines whether or not non-existence of
// a dependent object is an error. For regular imports, it is.
// for now, the only thing this affects is whether or not the
// db exists.
boolean waitOnPrecursor = false;
for (int i = 1; i < ast.getChildCount(); ++i) {
ASTNode child = (ASTNode) ast.getChild(i);
switch(child.getToken().getType()) {
case HiveParser.KW_EXTERNAL:
isExternalSet = true;
break;
case HiveParser.TOK_TABLELOCATION:
isLocationSet = true;
parsedLocation = EximUtil.relativeToAbsolutePath(conf, unescapeSQLString(child.getChild(0).getText()));
break;
case HiveParser.TOK_TAB:
ASTNode tableNameNode = (ASTNode) child.getChild(0);
Map.Entry<String, String> dbTablePair = getDbTableNamePair(tableNameNode);
parsedDbName = dbTablePair.getKey();
parsedTableName = dbTablePair.getValue();
// get partition metadata if partition specified
if (child.getChildCount() == 2) {
@SuppressWarnings("unused") ASTNode partspec = (ASTNode) child.getChild(1);
isPartSpecSet = true;
parsePartitionSpec(child, parsedPartSpec);
}
break;
}
}
if (StringUtils.isEmpty(parsedDbName)) {
parsedDbName = SessionState.get().getCurrentDatabase();
}
// parsing statement is now done, on to logic.
EximUtil.SemanticAnalyzerWrapperContext x = new EximUtil.SemanticAnalyzerWrapperContext(conf, db, inputs, outputs, rootTasks, LOG, ctx);
MetaData rv = EximUtil.getMetaDataFromLocation(fromTree.getText(), x.getConf());
tableExists = prepareImport(true, isLocationSet, isExternalSet, isPartSpecSet, waitOnPrecursor, parsedLocation, parsedTableName, parsedDbName, parsedPartSpec, fromTree.getText(), x, null, getTxnMgr(), 0, rv);
} catch (SemanticException e) {
throw e;
} catch (Exception e) {
throw new SemanticException(ErrorMsg.IMPORT_SEMANTIC_ERROR.getMsg(), e);
}
}
use of org.apache.hadoop.hive.ql.parse.repl.load.MetaData in project hive by apache.
the class LoadFunction method isFunctionAlreadyLoaded.
private boolean isFunctionAlreadyLoaded(Path funcDumpRoot) throws HiveException, IOException {
Path metadataPath = new Path(funcDumpRoot, EximUtil.METADATA_NAME);
FileSystem fs = FileSystem.get(metadataPath.toUri(), context.hiveConf);
MetaData metadata = EximUtil.readMetaData(fs, metadataPath);
Function function;
try {
String dbName = StringUtils.isBlank(dbNameToLoadIn) ? metadata.function.getDbName() : dbNameToLoadIn;
function = context.hiveDb.getFunction(dbName, metadata.function.getFunctionName());
} catch (HiveException e) {
if (e.getCause() instanceof NoSuchObjectException) {
return false;
}
throw e;
}
return (function != null);
}
Aggregations