Search in sources :

Example 1 with BatchedListEntries

use of org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries in project hadoop by apache.

the class CacheManager method listCacheDirectives.

public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
    assert namesystem.hasReadLock();
    final int NUM_PRE_ALLOCATED_ENTRIES = 16;
    String filterPath = null;
    if (filter.getPath() != null) {
        filterPath = validatePath(filter);
    }
    if (filter.getReplication() != null) {
        throw new InvalidRequestException("Filtering by replication is unsupported.");
    }
    // Querying for a single ID
    final Long id = filter.getId();
    if (id != null) {
        if (!directivesById.containsKey(id)) {
            throw new InvalidRequestException("Did not find requested id " + id);
        }
        // Since we use a tailMap on directivesById, setting prev to id-1 gets
        // us the directive with the id (if present)
        prevId = id - 1;
    }
    ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
    int numReplies = 0;
    SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
    for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
        if (numReplies >= maxListCacheDirectivesNumResponses) {
            return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
        }
        CacheDirective curDirective = cur.getValue();
        CacheDirectiveInfo info = cur.getValue().toInfo();
        // item and should break out.
        if (id != null && !(info.getId().equals(id))) {
            break;
        }
        if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
            continue;
        }
        if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
            continue;
        }
        boolean hasPermission = true;
        if (pc != null) {
            try {
                pc.checkPermission(curDirective.getPool(), FsAction.READ);
            } catch (AccessControlException e) {
                hasPermission = false;
            }
        }
        if (hasPermission) {
            replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
            numReplies++;
        }
    }
    return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
Also used : ArrayList(java.util.ArrayList) AccessControlException(org.apache.hadoop.security.AccessControlException) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) BatchedListEntries(org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException)

Example 2 with BatchedListEntries

use of org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries in project hadoop by apache.

the class ClientNamenodeProtocolTranslatorPB method listEncryptionZones.

@Override
public BatchedEntries<EncryptionZone> listEncryptionZones(long id) throws IOException {
    final ListEncryptionZonesRequestProto req = ListEncryptionZonesRequestProto.newBuilder().setId(id).build();
    try {
        EncryptionZonesProtos.ListEncryptionZonesResponseProto response = rpcProxy.listEncryptionZones(null, req);
        List<EncryptionZone> elements = Lists.newArrayListWithCapacity(response.getZonesCount());
        for (EncryptionZoneProto p : response.getZonesList()) {
            elements.add(PBHelperClient.convert(p));
        }
        return new BatchedListEntries<>(elements, response.getHasMore());
    } catch (ServiceException e) {
        throw ProtobufHelper.getRemoteException(e);
    }
}
Also used : EncryptionZone(org.apache.hadoop.hdfs.protocol.EncryptionZone) ServiceException(com.google.protobuf.ServiceException) ListEncryptionZonesRequestProto(org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto) BatchedListEntries(org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries) EncryptionZoneProto(org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto)

Example 3 with BatchedListEntries

use of org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries in project hadoop by apache.

the class EncryptionZoneManager method listEncryptionZones.

/**
   * Cursor-based listing of encryption zones.
   * <p/>
   * Called while holding the FSDirectory lock.
   */
BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId) throws IOException {
    assert dir.hasReadLock();
    if (!hasCreatedEncryptionZone()) {
        return new BatchedListEntries<EncryptionZone>(Lists.newArrayList(), false);
    }
    NavigableMap<Long, EncryptionZoneInt> tailMap = encryptionZones.tailMap(prevId, false);
    final int numResponses = Math.min(maxListEncryptionZonesResponses, tailMap.size());
    final List<EncryptionZone> zones = Lists.newArrayListWithExpectedSize(numResponses);
    int count = 0;
    for (EncryptionZoneInt ezi : tailMap.values()) {
        /*
       Skip EZs that are only present in snapshots. Re-resolve the path to 
       see if the path's current inode ID matches EZ map's INode ID.
       
       INode#getFullPathName simply calls getParent recursively, so will return
       the INode's parents at the time it was snapshotted. It will not 
       contain a reference INode.
      */
        final String pathName = getFullPathName(ezi);
        INode inode = dir.getInode(ezi.getINodeId());
        INode lastINode = null;
        if (inode.getParent() != null || inode.isRoot()) {
            INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
            lastINode = iip.getLastINode();
        }
        if (lastINode == null || lastINode.getId() != ezi.getINodeId()) {
            continue;
        }
        // Add the EZ to the result list
        zones.add(new EncryptionZone(ezi.getINodeId(), pathName, ezi.getSuite(), ezi.getVersion(), ezi.getKeyName()));
        count++;
        if (count >= numResponses) {
            break;
        }
    }
    final boolean hasMore = (numResponses < tailMap.size());
    return new BatchedListEntries<EncryptionZone>(zones, hasMore);
}
Also used : EncryptionZone(org.apache.hadoop.hdfs.protocol.EncryptionZone) BatchedListEntries(org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries)

Aggregations

BatchedListEntries (org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries)3 EncryptionZone (org.apache.hadoop.hdfs.protocol.EncryptionZone)2 ServiceException (com.google.protobuf.ServiceException)1 ArrayList (java.util.ArrayList)1 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)1 CacheDirective (org.apache.hadoop.hdfs.protocol.CacheDirective)1 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)1 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)1 EncryptionZoneProto (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto)1 ListEncryptionZonesRequestProto (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto)1 AccessControlException (org.apache.hadoop.security.AccessControlException)1