Search in sources :

Example 61 with ThriftSecurityException

use of org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException in project accumulo by apache.

the class CleanUp method call.

@Override
public Repo<Manager> call(long tid, Manager manager) {
    manager.clearMigrations(tableId);
    int refCount = 0;
    try {
        // look for other tables that references this table's files
        AccumuloClient client = manager.getContext();
        try (BatchScanner bs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8)) {
            Range allTables = TabletsSection.getRange();
            Range tableRange = TabletsSection.getRange(tableId);
            Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
            Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
            bs.setRanges(Arrays.asList(beforeTable, afterTable));
            bs.fetchColumnFamily(DataFileColumnFamily.NAME);
            IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
            GrepIterator.setTerm(cfg, "/" + tableId + "/");
            bs.addScanIterator(cfg);
            for (Entry<Key, Value> entry : bs) {
                if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
                    refCount++;
                }
            }
        }
    } catch (Exception e) {
        refCount = -1;
        log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
    }
    // remove metadata table entries
    try {
        // Intentionally do not pass manager lock. If manager loses lock, this operation may complete
        // before manager can kill itself.
        // If the manager lock passed to deleteTable, it is possible that the delete mutations will be
        // dropped. If the delete operations
        // are dropped and the operation completes, then the deletes will not be repeated.
        MetadataTableUtil.deleteTable(tableId, refCount != 0, manager.getContext(), null);
    } catch (Exception e) {
        log.error("error deleting " + tableId + " from metadata table", e);
    }
    // remove any problem reports the table may have
    try {
        ProblemReports.getInstance(manager.getContext()).deleteProblemReports(tableId);
    } catch (Exception e) {
        log.error("Failed to delete problem reports for table " + tableId, e);
    }
    if (refCount == 0) {
        // delete the map files
        try {
            VolumeManager fs = manager.getVolumeManager();
            for (String dir : manager.getContext().getTablesDirs()) {
                fs.deleteRecursively(new Path(dir, tableId.canonical()));
            }
        } catch (IOException e) {
            log.error("Unable to remove deleted table directory", e);
        } catch (IllegalArgumentException exception) {
            if (exception.getCause() instanceof UnknownHostException) {
                /* Thrown if HDFS encounters a DNS problem in some edge cases */
                log.error("Unable to remove deleted table directory", exception);
            } else {
                throw exception;
            }
        }
    }
    // remove table from zookeeper
    try {
        manager.getTableManager().removeTable(tableId);
        manager.getContext().clearTableListCache();
    } catch (Exception e) {
        log.error("Failed to find table id in zookeeper", e);
    }
    // remove any permissions associated with this table
    try {
        AuditedSecurityOperation.getInstance(manager.getContext()).deleteTable(manager.getContext().rpcCreds(), tableId, namespaceId);
    } catch (ThriftSecurityException e) {
        log.error("{}", e.getMessage(), e);
    }
    Utils.unreserveTable(manager, tableId, tid, true);
    Utils.unreserveNamespace(manager, namespaceId, tid, false);
    LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
    return null;
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) UnknownHostException(java.net.UnknownHostException) BatchScanner(org.apache.accumulo.core.client.BatchScanner) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key)

Aggregations

ThriftSecurityException (org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException)61 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)33 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)28 TException (org.apache.thrift.TException)25 ThriftTableOperationException (org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException)20 IOException (java.io.IOException)19 ArrayList (java.util.ArrayList)14 AccumuloException (org.apache.accumulo.core.client.AccumuloException)14 TableId (org.apache.accumulo.core.data.TableId)14 TKeyExtent (org.apache.accumulo.core.dataImpl.thrift.TKeyExtent)14 NamespaceNotFoundException (org.apache.accumulo.core.client.NamespaceNotFoundException)13 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)13 NamespaceId (org.apache.accumulo.core.data.NamespaceId)11 Tablet (org.apache.accumulo.tserver.tablet.Tablet)10 NoNodeException (org.apache.zookeeper.KeeperException.NoNodeException)10 HashSet (java.util.HashSet)9 NotServingTabletException (org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException)9 TabletClientService (org.apache.accumulo.core.tabletserver.thrift.TabletClientService)9 HashMap (java.util.HashMap)8 Map (java.util.Map)8