Search in sources :

Example 16 with DirectoryListing

use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.

the class TestINodeFile method testFilesInGetListingOps.

@Test
public void testFilesInGetListingOps() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final DistributedFileSystem hdfs = cluster.getFileSystem();
        final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        hdfs.mkdirs(new Path("/tmp"));
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);
        DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp", HdfsFileStatus.EMPTY_NAME, false);
        assertTrue(dl.getPartialListing().length == 3);
        String f2 = new String("f2");
        dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
        assertTrue(dl.getPartialListing().length == 1);
        INode f2INode = fsdir.getINode("/tmp/f2");
        String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
        dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(), false);
        assertTrue(dl.getPartialListing().length == 1);
        // Test the deleted startAfter file
        hdfs.delete(new Path("/tmp/f2"), false);
        try {
            dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(), false);
            fail("Didn't get exception for the deleted startAfter token.");
        } catch (IOException e) {
            assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DirectoryListingStartAfterNotFoundException(org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)16 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)5 FileNotFoundException (java.io.FileNotFoundException)4 ArrayList (java.util.ArrayList)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HdfsLocatedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus)4 Test (org.junit.Test)4 FileStatus (org.apache.hadoop.fs.FileStatus)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 Configuration (org.apache.hadoop.conf.Configuration)2 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)2 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 URI (java.net.URI)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 BlockLocation (org.apache.hadoop.fs.BlockLocation)1