Search in sources :

Example 11 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class Hdfs method listStatus.

@Override
public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException {
    String src = getUriPath(f);
    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
    if (thisListing == null) {
        // the directory does not exist
        throw new FileNotFoundException("File " + f + " does not exist.");
    }
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) {
        // got all entries of the directory
        FileStatus[] stats = new FileStatus[partialListing.length];
        for (int i = 0; i < partialListing.length; i++) {
            stats[i] = partialListing[i].makeQualified(getUri(), f);
        }
        return stats;
    }
    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing = new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(fileStatus.makeQualified(getUri(), f));
    }
    // now fetch more entries
    do {
        thisListing = dfs.listPaths(src, thisListing.getLastName());
        if (thisListing == null) {
            // the directory is deleted
            throw new FileNotFoundException("File " + f + " does not exist.");
        }
        partialListing = thisListing.getPartialListing();
        for (HdfsFileStatus fileStatus : partialListing) {
            listing.add(fileStatus.makeQualified(getUri(), f));
        }
    } while (thisListing.hasMore());
    return listing.toArray(new FileStatus[listing.size()]);
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList)

Example 12 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class FSDirStatAndListingOp method getSnapshotsListing.

/**
   * Get a listing of all the snapshots of a snapshottable directory
   */
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
    Preconditions.checkState(fsd.hasReadLock());
    Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
    // drop off the null .snapshot component
    iip = iip.getParentINodesInPath();
    final String dirPath = iip.getPath();
    final INode node = iip.getLastINode();
    final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
    final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
    if (sf == null) {
        throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
    }
    final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
    final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
        Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
        listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
    }
    return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DirectorySnapshottableFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 13 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class FSNamesystem method getListing.

/**
   * Get a partial listing of the indicated directory
   *
   * @param src the directory name
   * @param startAfter the name to start after
   * @param needLocation if blockLocations need to be returned
   * @return a partial listing starting after startAfter
   * 
   * @throws AccessControlException if access is denied
   * @throws UnresolvedLinkException if symbolic link is encountered
   * @throws IOException if other I/O error occurred
   */
DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws IOException {
    checkOperation(OperationCategory.READ);
    final String operationName = "listStatus";
    DirectoryListing dl = null;
    readLock();
    try {
        checkOperation(NameNode.OperationCategory.READ);
        dl = getListingInt(dir, src, startAfter, needLocation);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        readUnlock(operationName);
    }
    logAuditEvent(true, operationName, src);
    return dl;
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 14 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class NamenodeFsck method checkDir.

private void checkDir(String path, Result replRes, Result ecRes) throws IOException {
    if (snapshottableDirs != null && snapshottableDirs.contains(path)) {
        String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR) + HdfsConstants.DOT_SNAPSHOT_DIR;
        HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(snapshotPath);
        check(snapshotPath, snapshotFileInfo, replRes, ecRes);
    }
    byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
    DirectoryListing thisListing;
    if (showFiles) {
        out.println(path + " <dir>");
    }
    totalDirs++;
    do {
        assert lastReturnedName != null;
        thisListing = namenode.getRpcServer().getListing(path, lastReturnedName, false);
        if (thisListing == null) {
            return;
        }
        HdfsFileStatus[] files = thisListing.getPartialListing();
        for (int i = 0; i < files.length; i++) {
            check(path, files[i], replRes, ecRes);
        }
        lastReturnedName = thisListing.getLastName();
    } while (thisListing.hasMore());
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 15 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class TestErasureCodingPolicies method testReplicatedFileUnderECDir.

/**
   * for pre-existing files (with replicated blocks) in an EC dir, getListing
   * should report them as non-ec.
   */
@Test
public void testReplicatedFileUnderECDir() throws IOException {
    final Path dir = new Path("/ec");
    final Path replicatedFile = new Path(dir, "replicatedFile");
    // create a file with replicated blocks
    DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L);
    // set ec policy on dir
    fs.setErasureCodingPolicy(dir, StripedFileTestUtil.getDefaultECPolicy().getName());
    // create a file which should be using ec
    final Path ecSubDir = new Path(dir, "ecSubDir");
    final Path ecFile = new Path(ecSubDir, "ecFile");
    DFSTestUtil.createFile(fs, ecFile, 0, (short) 1, 0L);
    assertNull(fs.getClient().getFileInfo(replicatedFile.toString()).getErasureCodingPolicy());
    assertNotNull(fs.getClient().getFileInfo(ecFile.toString()).getErasureCodingPolicy());
    // list "/ec"
    DirectoryListing listing = fs.getClient().listPaths(dir.toString(), new byte[0], false);
    HdfsFileStatus[] files = listing.getPartialListing();
    assertEquals(2, files.length);
    // the listing is always sorted according to the local name
    assertEquals(ecSubDir.getName(), files[0].getLocalName());
    // ecSubDir
    assertNotNull(files[0].getErasureCodingPolicy());
    assertEquals(replicatedFile.getName(), files[1].getLocalName());
    // replicatedFile
    assertNull(files[1].getErasureCodingPolicy());
    // list "/ec/ecSubDir"
    files = fs.getClient().listPaths(ecSubDir.toString(), new byte[0], false).getPartialListing();
    assertEquals(1, files.length);
    assertEquals(ecFile.getName(), files[0].getLocalName());
    // ecFile
    assertNotNull(files[0].getErasureCodingPolicy());
    // list "/"
    files = fs.getClient().listPaths("/", new byte[0], false).getPartialListing();
    assertEquals(1, files.length);
    // ec
    assertEquals(dir.getName(), files[0].getLocalName());
    assertNotNull(files[0].getErasureCodingPolicy());
    // rename "/ec/ecSubDir/ecFile" to "/ecFile"
    assertTrue(fs.rename(ecFile, new Path("/ecFile")));
    files = fs.getClient().listPaths("/", new byte[0], false).getPartialListing();
    assertEquals(2, files.length);
    // ec
    assertEquals(dir.getName(), files[0].getLocalName());
    assertNotNull(files[0].getErasureCodingPolicy());
    assertEquals(ecFile.getName(), files[1].getLocalName());
    assertNotNull(files[1].getErasureCodingPolicy());
}
Also used : Path(org.apache.hadoop.fs.Path) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Test(org.junit.Test)

Aggregations

DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)16 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)5 FileNotFoundException (java.io.FileNotFoundException)4 ArrayList (java.util.ArrayList)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HdfsLocatedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus)4 Test (org.junit.Test)4 FileStatus (org.apache.hadoop.fs.FileStatus)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 Configuration (org.apache.hadoop.conf.Configuration)2 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)2 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 URI (java.net.URI)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 BlockLocation (org.apache.hadoop.fs.BlockLocation)1