Search in sources :

Example 21 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameMoreThanOnceAcrossSnapDirs.

/**
   * Test rename a dir and a file multiple times across snapshottable 
   * directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
   * 
   * Only create snapshots in the beginning (before the rename).
   */
@Test
public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path sdir3 = new Path("/dir3");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    hdfs.mkdirs(sdir3);
    final Path foo_dir1 = new Path(sdir1, "foo");
    final Path bar1_dir1 = new Path(foo_dir1, "bar1");
    final Path bar2_dir1 = new Path(sdir1, "bar");
    DFSTestUtil.createFile(hdfs, bar1_dir1, BLOCKSIZE, REPL, SEED);
    DFSTestUtil.createFile(hdfs, bar2_dir1, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s3");
    // 1. /dir1/foo -> /dir2/foo, /dir1/bar -> /dir2/bar
    final Path foo_dir2 = new Path(sdir2, "foo");
    hdfs.rename(foo_dir1, foo_dir2);
    final Path bar2_dir2 = new Path(sdir2, "bar");
    hdfs.rename(bar2_dir1, bar2_dir2);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // modification on /dir2/foo and /dir2/bar
    final Path bar1_dir2 = new Path(foo_dir2, "bar1");
    hdfs.setReplication(bar1_dir2, REPL_1);
    hdfs.setReplication(bar2_dir2, REPL_1);
    // check
    final Path bar1_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo/bar1");
    final Path bar2_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "bar");
    final Path bar1_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo/bar1");
    final Path bar2_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    FileStatus statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir2);
    assertEquals(REPL_1, statusBar1.getReplication());
    FileStatus statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_dir2);
    assertEquals(REPL_1, statusBar2.getReplication());
    // 2. /dir2/foo -> /dir3/foo, /dir2/bar -> /dir3/bar
    final Path foo_dir3 = new Path(sdir3, "foo");
    hdfs.rename(foo_dir2, foo_dir3);
    final Path bar2_dir3 = new Path(sdir3, "bar");
    hdfs.rename(bar2_dir2, bar2_dir3);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // modification on /dir3/foo and /dir3/bar
    final Path bar1_dir3 = new Path(foo_dir3, "bar1");
    hdfs.setReplication(bar1_dir3, REPL_2);
    hdfs.setReplication(bar2_dir3, REPL_2);
    // check
    final Path bar1_s3 = SnapshotTestHelper.getSnapshotPath(sdir3, "s3", "foo/bar1");
    final Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir3, "s3", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    assertFalse(hdfs.exists(bar1_s3));
    assertFalse(hdfs.exists(bar2_s3));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir3);
    assertEquals(REPL_2, statusBar1.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_dir3);
    assertEquals(REPL_2, statusBar2.getReplication());
    // 3. /dir3/foo -> /dir2/foo, /dir3/bar -> /dir2/bar
    hdfs.rename(foo_dir3, foo_dir2);
    hdfs.rename(bar2_dir3, bar2_dir2);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // modification on /dir2/foo
    hdfs.setReplication(bar1_dir2, REPL);
    hdfs.setReplication(bar2_dir2, REPL);
    // check
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    assertFalse(hdfs.exists(bar1_s3));
    assertFalse(hdfs.exists(bar2_s3));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir2);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_dir2);
    assertEquals(REPL, statusBar2.getReplication());
    // 4. /dir2/foo -> /dir1/foo, /dir2/bar -> /dir1/bar
    hdfs.rename(foo_dir2, foo_dir1);
    hdfs.rename(bar2_dir2, bar2_dir1);
    // check the internal details
    INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString()).asReference();
    INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
    // only 2 references: one in deleted list of sdir1, one in created list of
    // sdir1
    assertEquals(2, fooWithCount.getReferenceCount());
    INodeDirectory foo = fooWithCount.asDirectory();
    assertEquals(1, foo.getDiffs().asList().size());
    INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
    Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
    INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
    assertEquals(1, bar1.getDiffs().asList().size());
    assertEquals(s1.getId(), bar1.getDiffs().getLastSnapshotId());
    INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString()).asReference();
    INodeReference.WithCount barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(2, barWithCount.getReferenceCount());
    INodeFile bar = barWithCount.asFile();
    assertEquals(1, bar.getDiffs().asList().size());
    assertEquals(s1.getId(), bar.getDiffs().getLastSnapshotId());
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // delete foo
    hdfs.delete(foo_dir1, true);
    restartClusterAndCheckImage(true);
    hdfs.delete(bar2_dir1, true);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // check
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    assertFalse(hdfs.exists(bar1_s3));
    assertFalse(hdfs.exists(bar2_s3));
    assertFalse(hdfs.exists(foo_dir1));
    assertFalse(hdfs.exists(bar1_dir1));
    assertFalse(hdfs.exists(bar2_dir1));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
    fooRef = fsdir.getINode(foo_s1.toString()).asReference();
    fooWithCount = (WithCount) fooRef.getReferredINode();
    assertEquals(1, fooWithCount.getReferenceCount());
    barRef = fsdir.getINode(bar2_s1.toString()).asReference();
    barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(1, barWithCount.getReferenceCount());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 22 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_7.

/**
   * Test rename to an invalid name (xxx/.snapshot)
   */
@Test
public void testRenameUndo_7() throws Exception {
    final Path root = new Path("/");
    final Path foo = new Path(root, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    // create a snapshot on root
    SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
    // rename bar to /foo/.snapshot which is invalid
    final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
    try {
        hdfs.rename(bar, invalid);
        fail("expect exception since invalid name is used for rename");
    } catch (Exception e) {
        GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
    }
    // check
    INodeDirectory rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
    ReadOnlyList<INode> children = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(1, children.size());
    List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
    assertEquals(1, diffList.size());
    DirectoryDiff diff = diffList.get(0);
    // this diff is generated while renaming
    Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
    assertEquals(s1.getId(), diff.getSnapshotId());
    // after undo, the diff should be empty
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    // bar was converted to filewithsnapshot while renaming
    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    assertSame(barNode, children.get(0));
    assertSame(fooNode, barNode.getParent());
    List<FileDiff> barDiffList = barNode.getDiffs().asList();
    assertEquals(1, barDiffList.size());
    FileDiff barDiff = barDiffList.get(0);
    assertEquals(s1.getId(), barDiff.getSnapshotId());
    // restart cluster multiple times to make sure the fsimage and edits log are
    // correct. Note that when loading fsimage, foo and bar will be converted 
    // back to normal INodeDirectory and INodeFile since they do not store any 
    // snapshot data
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    hdfs.saveNamespace();
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
    cluster.waitActive();
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 23 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_5.

/**
   * This test demonstrates that 
   * {@link INodeDirectory#removeChild}
   * and 
   * {@link INodeDirectory#addChild}
   * should use {@link INode#isInLatestSnapshot} to check if the
   * added/removed child should be recorded in snapshots.
   */
@Test
public void testRenameDirAndDeleteSnapshot_5() throws Exception {
    final Path dir1 = new Path("/dir1");
    final Path dir2 = new Path("/dir2");
    final Path dir3 = new Path("/dir3");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(dir2);
    hdfs.mkdirs(dir3);
    final Path foo = new Path(dir1, "foo");
    hdfs.mkdirs(foo);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    final Path bar = new Path(foo, "bar");
    // create file bar, and foo will become an INodeDirectory with snapshot
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    // delete snapshot s1. now foo is not in any snapshot
    hdfs.deleteSnapshot(dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // rename /dir1/foo to /dir2/foo
    final Path foo2 = new Path(dir2, foo.getName());
    hdfs.rename(foo, foo2);
    // rename /dir2/foo/bar to /dir3/foo/bar
    final Path bar2 = new Path(dir2, "foo/bar");
    final Path bar3 = new Path(dir3, "bar");
    hdfs.rename(bar2, bar3);
    // delete /dir2/foo. Since it is not in any snapshot, we will call its 
    // destroy function. If we do not use isInLatestSnapshot in removeChild and
    // addChild methods in INodeDirectory (with snapshot), the file bar will be 
    // stored in the deleted list of foo, and will be destroyed.
    hdfs.delete(foo2, true);
    // check if /dir3/bar still exists
    assertTrue(hdfs.exists(bar3));
    INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
    assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 24 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestSnapshotBlocksMap method assertBlockCollection.

static INodeFile assertBlockCollection(String path, int numBlocks, final FSDirectory dir, final BlockManager blkManager) throws Exception {
    final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
    assertEquals(numBlocks, file.getBlocks().length);
    for (BlockInfo b : file.getBlocks()) {
        assertBlockCollection(blkManager, file, b);
    }
    return file;
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 25 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameAndAppend.

/**
   * Rename a file and then append the same file. 
   */
@Test
public void testRenameAndAppend() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir1, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, snap1);
    final Path foo2 = new Path(sdir2, "foo");
    hdfs.rename(foo, foo2);
    INode fooRef = fsdir.getINode4Write(foo2.toString());
    assertTrue(fooRef instanceof INodeReference.DstReference);
    FSDataOutputStream out = hdfs.append(foo2);
    try {
        byte[] content = new byte[1024];
        (new Random()).nextBytes(content);
        out.write(content);
        fooRef = fsdir.getINode4Write(foo2.toString());
        assertTrue(fooRef instanceof INodeReference.DstReference);
        INodeFile fooNode = fooRef.asFile();
        assertTrue(fooNode.isWithSnapshot());
        assertTrue(fooNode.isUnderConstruction());
    } finally {
        if (out != null) {
            out.close();
        }
    }
    fooRef = fsdir.getINode4Write(foo2.toString());
    assertTrue(fooRef instanceof INodeReference.DstReference);
    INodeFile fooNode = fooRef.asFile();
    assertTrue(fooNode.isWithSnapshot());
    assertFalse(fooNode.isUnderConstruction());
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INode(org.apache.hadoop.hdfs.server.namenode.INode) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)35 Path (org.apache.hadoop.fs.Path)24 Test (org.junit.Test)23 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)14 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)10 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)9 INode (org.apache.hadoop.hdfs.server.namenode.INode)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)5 TestINodeFile (org.apache.hadoop.hdfs.server.namenode.TestINodeFile)5 IOException (java.io.IOException)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)4 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)3 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)3