Search in sources :

Example 6 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestFileAppend4 method testAppendInsufficientLocations.

/**
   * Test that an append with no locations fails with an exception
   * showing insufficient locations.
   */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file with replication 3
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
        // Shut down all DNs that have the last block location for the file
        LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
        List<DataNode> dnsOfCluster = cluster.getDataNodes();
        DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
        for (DataNode dn : dnsOfCluster) {
            for (DatanodeInfo loc : dnsWithLocations) {
                if (dn.getDatanodeId().equals(loc)) {
                    dn.shutdown();
                    DFSTestUtil.waitForDatanodeDeath(dn);
                }
            }
        }
        // Wait till 0 replication is recognized
        DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
        // have the block.
        try {
            fileSystem.append(f);
            fail("Append should fail because insufficient locations");
        } catch (IOException e) {
            LOG.info("Expected exception: ", e);
        }
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
        assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 7 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_6.

/**
   * Test the rename undo when removing dst node fails
   */
@Test
public void testRenameUndo_6() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    final Path sub_dir2 = new Path(dir2, "subdir");
    final Path subsub_dir2 = new Path(sub_dir2, "subdir");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(subsub_dir2);
    final Path foo = new Path(dir1, "foo");
    hdfs.mkdirs(foo);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // set ns quota of dir2 to 4, so the current remaining is 1 (already has
    // dir2, sub_dir2, and subsub_dir2)
    hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
    FSDirectory fsdir2 = Mockito.spy(fsdir);
    Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2).removeLastINode((INodesInPath) Mockito.anyObject());
    Whitebox.setInternalState(fsn, "dir", fsdir2);
    // subsub_dir2.
    try {
        hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
        fail("Expect QuotaExceedException");
    } catch (Exception e) {
        String msg = "fake exception";
        GenericTestUtils.assertExceptionContains(msg, e);
    }
    // check the undo
    assertTrue(hdfs.exists(foo));
    INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
    List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode fooNode = childrenList.get(0);
    assertTrue(fooNode.asDirectory().isWithSnapshot());
    assertSame(dir1Node, fooNode.getParent());
    List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    DirectoryDiff diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
    // check dir2
    INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
    assertTrue(dir2Node.isSnapshottable());
    QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
    assertEquals(3, counts.getNameSpace());
    assertEquals(0, counts.getStorageSpace());
    childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode subdir2Node = childrenList.get(0);
    assertSame(dir2Node, subdir2Node.getParent());
    assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
    INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
    assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
    assertSame(subdir2Node, subsubdir2Node.getParent());
    diffList = (dir2Node).getDiffs().asList();
    assertEquals(1, diffList.size());
    diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) IOException(java.io.IOException) Test(org.junit.Test)

Example 8 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_5.

/**
   * Test rename while the rename operation will exceed the quota in the dst
   * tree.
   */
@Test
public void testRenameUndo_5() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    final Path subdir2 = new Path(dir2, "subdir2");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(subdir2);
    final Path foo = new Path(dir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // set ns quota of dir2 to 4, so the current remaining is 2 (already has
    // dir2, and subdir2)
    hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
    final Path foo2 = new Path(subdir2, foo.getName());
    FSDirectory fsdir2 = Mockito.spy(fsdir);
    Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2).addLastINode((INodesInPath) Mockito.anyObject(), (INode) Mockito.anyObject(), (FsPermission) Mockito.anyObject(), Mockito.anyBoolean());
    Whitebox.setInternalState(fsn, "dir", fsdir2);
    // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
    // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
    // However, the rename operation will fail since we let addLastINode throw
    // NSQuotaExceededException
    boolean rename = hdfs.rename(foo, foo2);
    assertFalse(rename);
    // check the undo
    assertTrue(hdfs.exists(foo));
    assertTrue(hdfs.exists(bar));
    INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
    List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode fooNode = childrenList.get(0);
    assertTrue(fooNode.asDirectory().isWithSnapshot());
    INode barNode = fsdir2.getINode4Write(bar.toString());
    assertTrue(barNode.getClass() == INodeFile.class);
    assertSame(fooNode, barNode.getParent());
    List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    DirectoryDiff diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
    // check dir2
    INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
    assertTrue(dir2Node.isSnapshottable());
    QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
    assertEquals(2, counts.getNameSpace());
    assertEquals(0, counts.getStorageSpace());
    childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode subdir2Node = childrenList.get(0);
    assertSame(dir2Node, subdir2Node.getParent());
    assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
    diffList = dir2Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 9 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestSnapshotManager method testSnapshotLimits.

/**
   * Test that the global limit on snapshots is honored.
   */
@Test(timeout = 10000)
public void testSnapshotLimits() throws Exception {
    // Setup mock objects for SnapshotManager.createSnapshot.
    //
    INodeDirectory ids = mock(INodeDirectory.class);
    FSDirectory fsdir = mock(FSDirectory.class);
    INodesInPath iip = mock(INodesInPath.class);
    SnapshotManager sm = spy(new SnapshotManager(fsdir));
    doReturn(ids).when(sm).getSnapshottableRoot((INodesInPath) anyObject());
    doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
    //
    for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
        sm.createSnapshot(iip, "dummy", i.toString());
    }
    //
    try {
        sm.createSnapshot(iip, "dummy", "shouldFailSnapshot");
        Assert.fail("Expected SnapshotException not thrown");
    } catch (SnapshotException se) {
        Assert.assertTrue(StringUtils.toLowerCase(se.getMessage()).contains("rollover"));
    }
    // Delete a snapshot to free up a slot.
    //
    sm.deleteSnapshot(iip, "", mock(INode.ReclaimContext.class));
    //
    try {
        sm.createSnapshot(iip, "dummy", "shouldFailSnapshot2");
        Assert.fail("Expected SnapshotException not thrown");
    } catch (SnapshotException se) {
        Assert.assertTrue(StringUtils.toLowerCase(se.getMessage()).contains("rollover"));
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException) Test(org.junit.Test)

Example 10 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestOfflineImageViewerWithStripedBlocks method testFileSize.

private void testFileSize(int numBytes) throws IOException, UnresolvedLinkException, SnapshotAccessControlException {
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File orgFsimage = null;
    Path file = new Path("/eczone/striped");
    FSDataOutputStream out = fs.create(file, true);
    byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
    out.write(bytes);
    out.close();
    // Write results to the fsimage file
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    fs.saveNamespace();
    // Determine location of fsimage file
    orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (orgFsimage == null) {
        throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
    String fileStatus = loader.getFileStatus("/eczone/striped");
    long expectedFileSize = bytes.length;
    // Verify space consumed present in BlockInfoStriped
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(), fileNode.getErasureCodingPolicyID());
    assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
    long actualFileSize = 0;
    for (BlockInfo blockInfo : fileNode.getBlocks()) {
        assertTrue("Didn't find block striped information", blockInfo instanceof BlockInfoStriped);
        actualFileSize += blockInfo.getNumBytes();
    }
    assertEquals("Wrongly computed file size contains striped blocks", expectedFileSize, actualFileSize);
    // Verify space consumed present in filestatus
    String EXPECTED_FILE_SIZE = "\"length\":" + String.valueOf(expectedFileSize);
    assertTrue("Wrongly computed file size contains striped blocks, file status:" + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE, fileStatus.contains(EXPECTED_FILE_SIZE));
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Aggregations

FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)10 Test (org.junit.Test)7 Path (org.apache.hadoop.fs.Path)6 IOException (java.io.IOException)4 INode (org.apache.hadoop.hdfs.server.namenode.INode)4 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)4 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)4 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)4 Configuration (org.apache.hadoop.conf.Configuration)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)2 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)2 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)2 File (java.io.File)1 InterruptedIOException (java.io.InterruptedIOException)1 Date (java.util.Date)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Block (org.apache.hadoop.hdfs.protocol.Block)1