use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.
the class BootStrapReplicationSpecFunction method fromMetaStore.
@Override
public ReplicationSpec fromMetaStore() throws HiveException {
try {
long currentReplicationState = (this.currentNotificationId > 0) ? this.currentNotificationId : db.getMSC().getCurrentNotificationEventId().getEventId();
ReplicationSpec replicationSpec = new ReplicationSpec(true, false, "replv2", "will-be-set", false, false);
replicationSpec.setCurrentReplicationState(String.valueOf(currentReplicationState));
return replicationSpec;
} catch (Exception e) {
throw new SemanticException(e);
}
}
use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.
the class DropDatabaseOperation method execute.
@Override
public int execute() throws HiveException {
try {
String dbName = desc.getDatabaseName();
ReplicationSpec replicationSpec = desc.getReplicationSpec();
if (replicationSpec.isInReplicationScope()) {
Database database = context.getDb().getDatabase(dbName);
if (database == null || !replicationSpec.allowEventReplacementInto(database.getParameters())) {
return 0;
}
}
context.getDb().dropDatabase(dbName, true, desc.getIfExists(), desc.isCasdade());
if (LlapHiveUtils.isLlapMode(context.getConf())) {
ProactiveEviction.Request.Builder llapEvictRequestBuilder = ProactiveEviction.Request.Builder.create();
llapEvictRequestBuilder.addDb(dbName);
ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
}
// Unregister the functions as well
if (desc.isCasdade()) {
FunctionRegistry.unregisterPermanentFunctions(dbName);
}
} catch (NoSuchObjectException ex) {
throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, desc.getDatabaseName());
}
return 0;
}
use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.
the class DropDatabaseAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String databaseName = unescapeIdentifier(root.getChild(0).getText());
boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null;
boolean cascade = root.getFirstChildWithType(HiveParser.TOK_CASCADE) != null;
Database database = getDatabase(databaseName, !ifExists);
if (database == null) {
return;
}
// if cascade=true, then we need to authorize the drop table action as well, and add the tables to the outputs
if (cascade) {
try {
for (Table table : db.getAllTableObjects(databaseName)) {
// We want no lock here, as the database lock will cover the tables,
// and putting a lock will actually cause us to deadlock on ourselves.
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
inputs.add(new ReadEntity(database));
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE));
DropDatabaseDesc desc = new DropDatabaseDesc(databaseName, ifExists, cascade, new ReplicationSpec());
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.
the class LoadTable method newTableTasks.
private void newTableTasks(ImportTableDesc tblDesc, Task<?> tblRootTask, TableLocationTuple tuple) throws Exception {
Table table = tblDesc.toTable(context.hiveConf);
ReplicationSpec replicationSpec = event.replicationSpec();
if (!tblDesc.isExternal()) {
tblDesc.setLocation(null);
}
Task<?> createTableTask = tblDesc.getCreateTableTask(new HashSet<>(), new HashSet<>(), context.hiveConf, true, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector, true);
if (tblRootTask == null) {
tblRootTask = createTableTask;
} else {
tblRootTask.addDependentTask(createTableTask);
}
if (replicationSpec.isMetadataOnly()) {
tracker.addTask(tblRootTask);
return;
}
Task<?> parentTask = createTableTask;
if (replicationSpec.isTransactionalTableDump()) {
List<String> partNames = isPartitioned(tblDesc) ? event.partitions(tblDesc) : null;
ReplTxnWork replTxnWork = new ReplTxnWork(tblDesc.getDatabaseName(), tblDesc.getTableName(), partNames, replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
Task<?> replTxnTask = TaskFactory.get(replTxnWork, context.hiveConf);
parentTask.addDependentTask(replTxnTask);
parentTask = replTxnTask;
}
boolean shouldCreateLoadTableTask = (!isPartitioned(tblDesc) && !TableType.EXTERNAL_TABLE.equals(table.getTableType())) || tuple.isConvertedFromManagedToExternal;
if (shouldCreateLoadTableTask) {
LOG.debug("adding dependent ReplTxnTask/CopyWork/MoveWork for table");
Task<?> loadTableTask = loadTableTask(table, replicationSpec, table.getDataLocation(), event.dataPath());
parentTask.addDependentTask(loadTableTask);
}
tracker.addTask(tblRootTask);
}
Aggregations