Search in sources :

Example 1 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class NameNodeRpcServer method getListing.

// ClientProtocol
@Override
public DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws IOException {
    checkNNStartup();
    DirectoryListing files = namesystem.getListing(src, startAfter, needLocation);
    if (files != null) {
        metrics.incrGetListingOps();
        metrics.incrFilesInGetListingOps(files.getPartialListing().length);
    }
    return files;
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing)

Example 2 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class TestINodeFile method testLocationLimitInListingOps.

@Test
public void testLocationLimitInListingOps() throws Exception {
    final Configuration conf = new Configuration();
    // 3 blocks * 3 replicas
    conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 9);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        final DistributedFileSystem hdfs = cluster.getFileSystem();
        ArrayList<String> source = new ArrayList<String>();
        // tmp1 holds files with 3 blocks, 3 replicas
        // tmp2 holds files with 3 blocks, 1 replica
        hdfs.mkdirs(new Path("/tmp1"));
        hdfs.mkdirs(new Path("/tmp2"));
        source.add("f1");
        source.add("f2");
        int numEntries = source.size();
        for (int j = 0; j < numEntries; j++) {
            DFSTestUtil.createFile(hdfs, new Path("/tmp1/" + source.get(j)), 4096, 3 * 1024 - 100, 1024, (short) 3, 0);
        }
        byte[] start = HdfsFileStatus.EMPTY_NAME;
        for (int j = 0; j < numEntries; j++) {
            DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp1", start, true);
            assertTrue(dl.getPartialListing().length == 1);
            for (int i = 0; i < dl.getPartialListing().length; i++) {
                source.remove(dl.getPartialListing()[i].getLocalName());
            }
            start = dl.getLastName();
        }
        // Verify we have listed all entries in the directory.
        assertTrue(source.size() == 0);
        // Now create 6 files, each with 3 locations. Should take 2 iterations of 3
        source.add("f1");
        source.add("f2");
        source.add("f3");
        source.add("f4");
        source.add("f5");
        source.add("f6");
        numEntries = source.size();
        for (int j = 0; j < numEntries; j++) {
            DFSTestUtil.createFile(hdfs, new Path("/tmp2/" + source.get(j)), 4096, 3 * 1024 - 100, 1024, (short) 1, 0);
        }
        start = HdfsFileStatus.EMPTY_NAME;
        for (int j = 0; j < numEntries / 3; j++) {
            DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp2", start, true);
            assertTrue(dl.getPartialListing().length == 3);
            for (int i = 0; i < dl.getPartialListing().length; i++) {
                source.remove(dl.getPartialListing()[i].getLocalName());
            }
            start = dl.getLastName();
        }
        // Verify we have listed all entries in tmp2.
        assertTrue(source.size() == 0);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 3 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class RpcProgramNfs3 method readdir.

public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    READDIR3Request request;
    try {
        request = READDIR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READDIR request");
        return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    long cookie = request.getCookie();
    if (cookie < 0) {
        LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
        return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    long count = request.getCount();
    if (count <= 0) {
        LOG.info("Nonpositive count in invalid READDIR request: " + count);
        return new READDIR3Response(Nfs3Status.NFS3_OK);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: " + cookie + " count: " + count + " client: " + remoteAddress);
    }
    HdfsFileStatus dirStatus;
    DirectoryListing dlisting;
    Nfs3FileAttributes postOpAttr;
    long dotdotFileId = 0;
    try {
        String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
        dirStatus = dfsClient.getFileInfo(dirFileIdPath);
        if (dirStatus == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!dirStatus.isDir()) {
            LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId());
            return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
        }
        long cookieVerf = request.getCookieVerf();
        if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
            if (aixCompatMode) {
                // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
                // the same cookieverf value even across VFS-level readdir calls,
                // instead of getting a new cookieverf for every VFS-level readdir
                // call, and reusing the cookieverf only in the event that multiple
                // incremental NFS-level readdir calls must be made to fetch all of
                // the directory entries. This means that whenever a readdir call is
                // made by an AIX NFS client for a given directory, and that directory
                // is subsequently modified, thus changing its mtime, no later readdir
                // calls will succeed from AIX for that directory until the FS is
                // unmounted/remounted. See HDFS-6549 for more info.
                LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
            } else {
                LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf + " dir cookieVerf: " + dirStatus.getModificationTime());
                return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug));
            }
        }
        if (cookie == 0) {
            // Get dotdot fileId
            String dotdotFileIdPath = dirFileIdPath + "/..";
            HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
            if (dotdotStatus == null) {
                // This should not happen
                throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
            }
            dotdotFileId = dotdotStatus.getFileId();
        }
        // Get the list from the resume point
        byte[] startAfter;
        if (cookie == 0) {
            startAfter = HdfsFileStatus.EMPTY_NAME;
        } else {
            String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
            startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
        }
        dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
        postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (postOpAttr == null) {
            LOG.error("Can't get path for fileId: " + handle.getFileId());
            return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new READDIR3Response(status);
    }
    /**
     * Set up the dirents in the response. fileId is used as the cookie with one
     * exception. Linux client can either be stuck with "ls" command (on REHL)
     * or report "Too many levels of symbolic links" (Ubuntu).
     *
     * The problem is that, only two items returned, "." and ".." when the
     * namespace is empty. Both of them are "/" with the same cookie(root
     * fileId). Linux client doesn't think such a directory is a real directory.
     * Even though NFS protocol specifies cookie is an opaque data, Linux client
     * somehow doesn't like an empty dir returns same cookie for both "." and
     * "..".
     *
     * The workaround is to use 0 as the cookie for "." and always return "." as
     * the first entry in readdir/readdirplus response.
     */
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, count - 2);
    boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
    Entry3[] entries;
    if (cookie == 0) {
        entries = new Entry3[n + 2];
        entries[0] = new READDIR3Response.Entry3(postOpAttr.getFileId(), ".", 0);
        entries[1] = new READDIR3Response.Entry3(dotdotFileId, "..", dotdotFileId);
        for (int i = 2; i < n + 2; i++) {
            entries[i] = new READDIR3Response.Entry3(fstatus[i - 2].getFileId(), fstatus[i - 2].getLocalName(), fstatus[i - 2].getFileId());
        }
    } else {
        // Resume from last readdirplus. If the cookie is "..", the result
        // list is up the directory content since HDFS uses name as resume point.    
        entries = new Entry3[n];
        for (int i = 0; i < n; i++) {
            entries[i] = new READDIR3Response.Entry3(fstatus[i].getFileId(), fstatus[i].getLocalName(), fstatus[i].getFileId());
        }
    }
    DirList3 dirList = new READDIR3Response.DirList3(entries, eof);
    return new READDIR3Response(Nfs3Status.NFS3_OK, postOpAttr, dirStatus.getModificationTime(), dirList);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Entry3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3) READDIR3Response(org.apache.hadoop.nfs.nfs3.response.READDIR3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) DirList3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.DirList3) READDIR3Request(org.apache.hadoop.nfs.nfs3.request.READDIR3Request) Entry3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 4 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class RpcProgramNfs3 method readdirplus.

@VisibleForTesting
READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES);
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
    }
    READDIRPLUS3Request request = null;
    try {
        request = READDIRPLUS3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READDIRPLUS request");
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    long cookie = request.getCookie();
    if (cookie < 0) {
        LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    long dirCount = request.getDirCount();
    if (dirCount <= 0) {
        LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    int maxCount = request.getMaxCount();
    if (maxCount <= 0) {
        LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount + " client: " + remoteAddress);
    }
    HdfsFileStatus dirStatus;
    DirectoryListing dlisting;
    Nfs3FileAttributes postOpDirAttr;
    long dotdotFileId = 0;
    HdfsFileStatus dotdotStatus = null;
    try {
        String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
        dirStatus = dfsClient.getFileInfo(dirFileIdPath);
        if (dirStatus == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!dirStatus.isDir()) {
            LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId());
            return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
        }
        long cookieVerf = request.getCookieVerf();
        if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
            if (aixCompatMode) {
                // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
                // the same cookieverf value even across VFS-level readdir calls,
                // instead of getting a new cookieverf for every VFS-level readdir
                // call. This means that whenever a readdir call is made by an AIX NFS
                // client for a given directory, and that directory is subsequently
                // modified, thus changing its mtime, no later readdir calls will
                // succeed for that directory from AIX until the FS is
                // unmounted/remounted. See HDFS-6549 for more info.
                LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
            } else {
                LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf + " dir cookieverf: " + dirStatus.getModificationTime());
                return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug), 0, null);
            }
        }
        if (cookie == 0) {
            // Get dotdot fileId
            String dotdotFileIdPath = dirFileIdPath + "/..";
            dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
            if (dotdotStatus == null) {
                // This should not happen
                throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
            }
            dotdotFileId = dotdotStatus.getFileId();
        }
        // Get the list from the resume point
        byte[] startAfter;
        if (cookie == 0) {
            startAfter = HdfsFileStatus.EMPTY_NAME;
        } else {
            String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
            startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
        }
        dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
        postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (postOpDirAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new READDIRPLUS3Response(status);
    }
    // Set up the dirents in the response
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, dirCount - 2);
    boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
    READDIRPLUS3Response.EntryPlus3[] entries;
    if (cookie == 0) {
        entries = new READDIRPLUS3Response.EntryPlus3[n + 2];
        entries[0] = new READDIRPLUS3Response.EntryPlus3(postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(postOpDirAttr.getFileId()));
        entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..", dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus, iug), new FileHandle(dotdotFileId));
        for (int i = 2; i < n + 2; i++) {
            long fileId = fstatus[i - 2].getFileId();
            FileHandle childHandle = new FileHandle(fileId);
            Nfs3FileAttributes attr;
            try {
                attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
            } catch (IOException e) {
                LOG.error("Can't get file attributes for fileId: " + fileId, e);
                continue;
            }
            entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i - 2].getLocalName(), fileId, attr, childHandle);
        }
    } else {
        // Resume from last readdirplus. If the cookie is "..", the result
        // list is up the directory content since HDFS uses name as resume point.
        entries = new READDIRPLUS3Response.EntryPlus3[n];
        for (int i = 0; i < n; i++) {
            long fileId = fstatus[i].getFileId();
            FileHandle childHandle = new FileHandle(fileId);
            Nfs3FileAttributes attr;
            try {
                attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
            } catch (IOException e) {
                LOG.error("Can't get file attributes for fileId: " + fileId, e);
                continue;
            }
            entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i].getLocalName(), fileId, attr, childHandle);
        }
    }
    DirListPlus3 dirListPlus = new READDIRPLUS3Response.DirListPlus3(entries, eof);
    return new READDIRPLUS3Response(Nfs3Status.NFS3_OK, postOpDirAttr, dirStatus.getModificationTime(), dirListPlus);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) READDIRPLUS3Response(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response) IOException(java.io.IOException) READDIRPLUS3Request(org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DirListPlus3(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.DirListPlus3) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 5 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class FSDirStatAndListingOp method getListing.

/**
   * Get a partial listing of the indicated directory
   *
   * We will stop when any of the following conditions is met:
   * 1) this.lsLimit files have been added
   * 2) needLocation is true AND enough files have been added such
   * that at least this.lsLimit block locations are in the response
   *
   * @param fsd FSDirectory
   * @param iip the INodesInPath instance containing all the INodes along the
   *            path
   * @param startAfter the name to start listing after
   * @param needLocation if block locations are returned
   * @param includeStoragePolicy if storage policy is returned
   * @return a partial listing starting after startAfter
   */
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter, boolean needLocation, boolean includeStoragePolicy) throws IOException {
    if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
        return getReservedListing(fsd);
    }
    fsd.readLock();
    try {
        if (iip.isDotSnapshotDir()) {
            return getSnapshotsListing(fsd, iip, startAfter);
        }
        final int snapshot = iip.getPathSnapshotId();
        final INode targetNode = iip.getLastINode();
        if (targetNode == null) {
            return null;
        }
        byte parentStoragePolicy = includeStoragePolicy ? targetNode.getStoragePolicyID() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
        if (!targetNode.isDirectory()) {
            // target INode
            return new DirectoryListing(new HdfsFileStatus[] { createFileStatus(fsd, iip, null, parentStoragePolicy, needLocation) }, 0);
        }
        final INodeDirectory dirInode = targetNode.asDirectory();
        final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
        int startChild = INodeDirectory.nextChild(contents, startAfter);
        int totalNumChildren = contents.size();
        int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit());
        int locationBudget = fsd.getLsLimit();
        int listingCnt = 0;
        HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
        for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
            INode child = contents.get(startChild + i);
            byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink()) ? getStoragePolicyID(child.getLocalStoragePolicyID(), parentStoragePolicy) : parentStoragePolicy;
            listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
            listingCnt++;
            if (listing[i] instanceof HdfsLocatedFileStatus) {
                // Once we  hit lsLimit locations, stop.
                // This helps to prevent excessively large response payloads.
                // Approximate #locations with locatedBlockCount() * repl_factor
                LocatedBlocks blks = ((HdfsLocatedFileStatus) listing[i]).getBlockLocations();
                locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication();
            }
        }
        // truncate return array if necessary
        if (listingCnt < numOfListing) {
            listing = Arrays.copyOf(listing, listingCnt);
        }
        return new DirectoryListing(listing, totalNumChildren - startChild - listingCnt);
    } finally {
        fsd.readUnlock();
    }
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Aggregations

DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)16 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)5 FileNotFoundException (java.io.FileNotFoundException)4 ArrayList (java.util.ArrayList)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HdfsLocatedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus)4 Test (org.junit.Test)4 FileStatus (org.apache.hadoop.fs.FileStatus)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 Configuration (org.apache.hadoop.conf.Configuration)2 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)2 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 URI (java.net.URI)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 BlockLocation (org.apache.hadoop.fs.BlockLocation)1