use of org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException in project accumulo by apache.
the class CleanUp method call.
@Override
public Repo<Manager> call(long tid, Manager manager) {
manager.clearMigrations(tableId);
int refCount = 0;
try {
// look for other tables that references this table's files
AccumuloClient client = manager.getContext();
try (BatchScanner bs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8)) {
Range allTables = TabletsSection.getRange();
Range tableRange = TabletsSection.getRange(tableId);
Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
bs.setRanges(Arrays.asList(beforeTable, afterTable));
bs.fetchColumnFamily(DataFileColumnFamily.NAME);
IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
GrepIterator.setTerm(cfg, "/" + tableId + "/");
bs.addScanIterator(cfg);
for (Entry<Key, Value> entry : bs) {
if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
refCount++;
}
}
}
} catch (Exception e) {
refCount = -1;
log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
}
// remove metadata table entries
try {
// Intentionally do not pass manager lock. If manager loses lock, this operation may complete
// before manager can kill itself.
// If the manager lock passed to deleteTable, it is possible that the delete mutations will be
// dropped. If the delete operations
// are dropped and the operation completes, then the deletes will not be repeated.
MetadataTableUtil.deleteTable(tableId, refCount != 0, manager.getContext(), null);
} catch (Exception e) {
log.error("error deleting " + tableId + " from metadata table", e);
}
// remove any problem reports the table may have
try {
ProblemReports.getInstance(manager.getContext()).deleteProblemReports(tableId);
} catch (Exception e) {
log.error("Failed to delete problem reports for table " + tableId, e);
}
if (refCount == 0) {
// delete the map files
try {
VolumeManager fs = manager.getVolumeManager();
for (String dir : manager.getContext().getTablesDirs()) {
fs.deleteRecursively(new Path(dir, tableId.canonical()));
}
} catch (IOException e) {
log.error("Unable to remove deleted table directory", e);
} catch (IllegalArgumentException exception) {
if (exception.getCause() instanceof UnknownHostException) {
/* Thrown if HDFS encounters a DNS problem in some edge cases */
log.error("Unable to remove deleted table directory", exception);
} else {
throw exception;
}
}
}
// remove table from zookeeper
try {
manager.getTableManager().removeTable(tableId);
manager.getContext().clearTableListCache();
} catch (Exception e) {
log.error("Failed to find table id in zookeeper", e);
}
// remove any permissions associated with this table
try {
AuditedSecurityOperation.getInstance(manager.getContext()).deleteTable(manager.getContext().rpcCreds(), tableId, namespaceId);
} catch (ThriftSecurityException e) {
log.error("{}", e.getMessage(), e);
}
Utils.unreserveTable(manager, tableId, tid, true);
Utils.unreserveNamespace(manager, namespaceId, tid, false);
LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
return null;
}
Aggregations