Search in sources :

Example 41 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFromNonSDir2SDir.

/**
 * Test rename from a non-snapshottable dir to a snapshottable dir
 */
@Test(timeout = 60000)
public void testRenameFromNonSDir2SDir() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1);
    final Path newfoo = new Path(sdir2, "foo");
    hdfs.rename(foo, newfoo);
    INode fooNode = fsdir.getINode4Write(newfoo.toString());
    assertTrue(fooNode instanceof INodeDirectory);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) Test(org.junit.Test)

Example 42 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class TestRollingUpgradeRollback method testRollbackCommand.

@Test
public void testRollbackCommand() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final DFSAdmin dfsadmin = new DFSAdmin(conf);
        dfs.mkdirs(foo);
        // start rolling upgrade
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // create new directory
        dfs.mkdirs(bar);
        // check NNStorage
        NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
        // (startSegment, mkdir, endSegment)
        checkNNStorage(storage, 3, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
    NameNode nn = null;
    try {
        nn = NameNode.createNameNode(new String[] { "-rollingUpgrade", "rollback" }, conf);
        // make sure /foo is still there, but /bar is not
        INode fooNode = nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
        Assert.assertNotNull(fooNode);
        INode barNode = nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
        Assert.assertNull(barNode);
        // check the details of NNStorage
        NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
        // (startSegment, upgrade marker, mkdir, endSegment)
        checkNNStorage(storage, 3, 7);
    } finally {
        if (nn != null) {
            nn.stop();
            nn.join();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) INode(org.apache.hadoop.hdfs.server.namenode.INode) Configuration(org.apache.hadoop.conf.Configuration) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

INode (org.apache.hadoop.hdfs.server.namenode.INode)42 Test (org.junit.Test)23 Path (org.apache.hadoop.fs.Path)22 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)21 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)15 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)11 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)11 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)8 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)8 ArrayList (java.util.ArrayList)6 IOException (java.io.IOException)5 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 DiffReportEntry (org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 UnsupportedEncodingException (java.io.UnsupportedEncodingException)1 ArrayDeque (java.util.ArrayDeque)1