Search in sources :

Example 1 with Snapshot

use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.

the class TestFSImageWithSnapshot method testSnapshotOnRoot.

/**
   * Test when there is snapshot taken on root
   */
@Test
public void testSnapshotOnRoot() throws Exception {
    final Path root = new Path("/");
    hdfs.allowSnapshot(root);
    hdfs.createSnapshot(root, "s1");
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
    // save namespace and restart cluster
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    hdfs.saveNamespace();
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
    INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString()).asDirectory();
    assertTrue("The children list of root should be empty", rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
    // one snapshot on root: s1
    List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
    assertEquals(1, diffList.size());
    Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
    // check SnapshotManager's snapshottable directory list
    assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
    SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager().getSnapshottableDirListing(null);
    assertEquals(root, sdirs[0].getFullPath());
    // save namespace and restart cluster
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    hdfs.saveNamespace();
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
}
Also used : Path(org.apache.hadoop.fs.Path) Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Example 2 with Snapshot

use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.

the class TestSnapshotPathINodes method testSnapshotPathINodesAfterModification.

/** 
   * for snapshot file while modifying file after snapshot.
   */
@Test(timeout = 15000)
public void testSnapshotPathINodesAfterModification() throws Exception {
    // First check the INode for /TestSnapshot/sub1/file1
    byte[][] components = INode.getPathComponents(file1.toString());
    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // The number of inodes should be equal to components.length
    assertEquals(nodesInPath.length(), components.length);
    // The last INode should be associated with file1
    assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), file1.toString());
    // record the modification time of the inode
    final long modTime = nodesInPath.getINode(nodesInPath.length() - 1).getModificationTime();
    // Create a snapshot for the dir, and check the inodes for the path
    // pointing to a snapshot file
    hdfs.allowSnapshot(sub1);
    hdfs.createSnapshot(sub1, "s3");
    // Modify file1
    DFSTestUtil.appendFile(hdfs, file1, "the content for appending");
    // Check the INodes for snapshot of file1
    String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
    components = INode.getPathComponents(snapshotPath);
    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // Length of ssInodes should be (components.length - 1), since we will
    // ignore ".snapshot" 
    assertEquals(ssNodesInPath.length(), components.length - 1);
    final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
    assertSnapshot(ssNodesInPath, true, s3, 3);
    // Check the INode for snapshot of file1
    INode snapshotFileNode = ssNodesInPath.getLastINode();
    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
    assertTrue(snapshotFileNode.asFile().isWithSnapshot());
    // The modification time of the snapshot INode should be the same with the
    // original INode before modification
    assertEquals(modTime, snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
    // Check the INode for /TestSnapshot/sub1/file1 again
    components = INode.getPathComponents(file1.toString());
    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    assertSnapshot(newNodesInPath, false, s3, -1);
    // The number of inodes should be equal to components.length
    assertEquals(newNodesInPath.length(), components.length);
    // The last INode should be associated with file1
    final int last = components.length - 1;
    assertEquals(newNodesInPath.getINode(last).getFullPathName(), file1.toString());
    // The modification time of the INode for file3 should have been changed
    Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
    hdfs.deleteSnapshot(sub1, "s3");
    hdfs.disallowSnapshot(sub1);
}
Also used : Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) Test(org.junit.Test)

Example 3 with Snapshot

use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.

the class TestSnapshotPathINodes method testSnapshotPathINodesWithAddedFile.

/**
   * for snapshot file while adding a new file after snapshot.
   */
@Test(timeout = 15000)
public void testSnapshotPathINodesWithAddedFile() throws Exception {
    // Create a snapshot for the dir, and check the inodes for the path
    // pointing to a snapshot file
    hdfs.allowSnapshot(sub1);
    hdfs.createSnapshot(sub1, "s4");
    // Add a new file /TestSnapshot/sub1/file3
    final Path file3 = new Path(sub1, "file3");
    DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
    Snapshot s4;
    {
        // Check the inodes for /TestSnapshot/sub1/.snapshot/s4/file3
        String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
        byte[][] components = INode.getPathComponents(snapshotPath);
        INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
        // Length of inodes should be (components.length - 1), since we will ignore
        // ".snapshot" 
        assertEquals(nodesInPath.length(), components.length - 1);
        // The number of non-null inodes should be components.length - 2, since
        // snapshot of file3 does not exist
        assertEquals(getNumNonNull(nodesInPath), components.length - 2);
        s4 = getSnapshot(nodesInPath, "s4", 3);
        // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null}
        assertSnapshot(nodesInPath, true, s4, 3);
        // Check the last INode in inodes, which should be null
        assertNull(nodesInPath.getINode(nodesInPath.length() - 1));
    }
    // Check the inodes for /TestSnapshot/sub1/file3
    byte[][] components = INode.getPathComponents(file3.toString());
    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // The number of inodes should be equal to components.length
    assertEquals(nodesInPath.length(), components.length);
    // The returned nodesInPath should be non-snapshot
    assertSnapshot(nodesInPath, false, s4, -1);
    // The last INode should be associated with file3
    assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), file3.toString());
    assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), sub1.toString());
    assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString());
    hdfs.deleteSnapshot(sub1, "s4");
    hdfs.disallowSnapshot(sub1);
}
Also used : Path(org.apache.hadoop.fs.Path) Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) Test(org.junit.Test)

Example 4 with Snapshot

use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.

the class TestSnapshotPathINodes method testSnapshotPathINodesAfterDeletion.

/** 
   * for snapshot file after deleting the original file.
   */
@Test(timeout = 15000)
public void testSnapshotPathINodesAfterDeletion() throws Exception {
    // Create a snapshot for the dir, and check the inodes for the path
    // pointing to a snapshot file
    hdfs.allowSnapshot(sub1);
    hdfs.createSnapshot(sub1, "s2");
    // Delete the original file /TestSnapshot/sub1/file1
    hdfs.delete(file1, false);
    final Snapshot snapshot;
    {
        // Resolve the path for the snapshot file
        // /TestSnapshot/sub1/.snapshot/s2/file1
        String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
        byte[][] components = INode.getPathComponents(snapshotPath);
        INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
        // Length of inodes should be (components.length - 1), since we will ignore
        // ".snapshot" 
        assertEquals(nodesInPath.length(), components.length - 1);
        // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
        snapshot = getSnapshot(nodesInPath, "s2", 3);
        assertSnapshot(nodesInPath, true, snapshot, 3);
        // Check the INode for file1 (snapshot file)
        final INode inode = nodesInPath.getLastINode();
        assertEquals(file1.getName(), inode.getLocalName());
        assertTrue(inode.asFile().isWithSnapshot());
    }
    // Check the INodes for path /TestSnapshot/sub1/file1
    byte[][] components = INode.getPathComponents(file1.toString());
    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // The length of inodes should be equal to components.length
    assertEquals(nodesInPath.length(), components.length);
    // The number of non-null elements should be components.length - 1 since
    // file1 has been deleted
    assertEquals(getNumNonNull(nodesInPath), components.length - 1);
    // The returned nodesInPath should be non-snapshot
    assertSnapshot(nodesInPath, false, snapshot, -1);
    // The last INode should be null, and the one before should be associated
    // with sub1
    assertNull(nodesInPath.getINode(components.length - 1));
    assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), sub1.toString());
    assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString());
    hdfs.deleteSnapshot(sub1, "s2");
    hdfs.disallowSnapshot(sub1);
}
Also used : Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) Test(org.junit.Test)

Example 5 with Snapshot

use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.

the class FSDirSnapshotOp method getSnapshotFiles.

/** Get a collection of full snapshot paths given file and snapshot dir.
   * @param lsf a list of snapshottable features
   * @param file full path of the file
   * @return collection of full paths of snapshot of the file
   */
static Collection<String> getSnapshotFiles(FSDirectory fsd, List<DirectorySnapshottableFeature> lsf, String file) throws IOException {
    ArrayList<String> snaps = new ArrayList<>();
    for (DirectorySnapshottableFeature sf : lsf) {
        // for each snapshottable dir e.g. /dir1, /dir2
        final ReadOnlyList<Snapshot> lsnap = sf.getSnapshotList();
        for (Snapshot s : lsnap) {
            // for each snapshot name under snapshottable dir
            // e.g. /dir1/.snapshot/s1, /dir1/.snapshot/s2
            final String dirName = s.getRoot().getRootFullPathName();
            if (!file.startsWith(dirName)) {
                // file not in current snapshot root dir, no need to check other snaps
                break;
            }
            String snapname = s.getRoot().getFullPathName();
            if (dirName.equals(Path.SEPARATOR)) {
                // handle rootDir
                snapname += Path.SEPARATOR;
            }
            snapname += file.substring(file.indexOf(dirName) + dirName.length());
            if (fsd.getFSNamesystem().getFileInfo(snapname, true) != null) {
                snaps.add(snapname);
            }
        }
    }
    return snaps;
}
Also used : Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) ArrayList(java.util.ArrayList) DirectorySnapshottableFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature)

Aggregations

Snapshot (org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot)8 Test (org.junit.Test)5 Path (org.apache.hadoop.fs.Path)3 DirectorySnapshottableFeature (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature)2 FileNotFoundException (java.io.FileNotFoundException)1 ArrayList (java.util.ArrayList)1 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)1 SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)1 DirectoryWithSnapshotFeature (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature)1 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)1 ChunkedArrayList (org.apache.hadoop.util.ChunkedArrayList)1