Search in sources :

Example 1 with DirectoryDiffList

use of org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteEarliestSnapshot2.

/**
   * Test deleting the earliest (first) snapshot. In this more complicated 
   * scenario, the snapshots are taken across directories.
   * <pre>
   * The test covers the following scenarios:
   * 1. delete the first diff in the diff list of a directory
   * 2. delete the first diff in the diff list of a file
   * </pre>
   * Also, the recursive cleanTree process should cover both INodeFile and 
   * INodeDirectory.
   */
@Test(timeout = 300000)
public void testDeleteEarliestSnapshot2() throws Exception {
    Path noChangeDir = new Path(sub, "noChangeDir");
    Path noChangeFile = new Path(noChangeDir, "noChangeFile");
    Path metaChangeFile = new Path(noChangeDir, "metaChangeFile");
    Path metaChangeDir = new Path(noChangeDir, "metaChangeDir");
    Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile toDeleteFileNode = TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = toDeleteFileNode.getBlocks();
    // create snapshot s0 on dir
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
    hdfs.delete(toDeleteFile, true);
    // the deletion adds diff of toDeleteFile and metaChangeDir
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
    // /TestSnapshot/sub/noChangeDir/metaChangeFile
    hdfs.setReplication(metaChangeFile, REPLICATION_1);
    hdfs.setOwner(metaChangeDir, "unknown", "unknown");
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // create snapshot s1 on dir
    hdfs.createSnapshot(dir, "s1");
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // delete snapshot s0
    hdfs.deleteSnapshot(dir, "s0");
    // namespace: remove toDeleteFile and its diff, metaChangeFile's diff, 
    // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and 
    // metaChangeFile's replication factor decreases
    checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // check 1. there is no snapshot s0
    final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
    Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
    assertNull(snapshot0);
    Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
    DirectoryDiffList diffList = dirNode.getDiffs();
    assertEquals(1, diffList.asList().size());
    assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
    diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
    assertEquals(0, diffList.asList().size());
    // check 2. noChangeDir and noChangeFile are still there
    final INodeDirectory noChangeDirNode = (INodeDirectory) fsdir.getINode(noChangeDir.toString());
    assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
    final INodeFile noChangeFileNode = (INodeFile) fsdir.getINode(noChangeFile.toString());
    assertEquals(INodeFile.class, noChangeFileNode.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1, fsdir, blockmanager);
    // check 3: current metadata of metaChangeFile and metaChangeDir
    FileStatus status = hdfs.getFileStatus(metaChangeDir);
    assertEquals("unknown", status.getOwner());
    assertEquals("unknown", status.getGroup());
    status = hdfs.getFileStatus(metaChangeFile);
    assertEquals(REPLICATION_1, status.getReplication());
    TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(), 1, fsdir, blockmanager);
    // check 4: no snapshot copy for toDeleteFile
    try {
        hdfs.getFileStatus(toDeleteFile);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(), e);
    }
    final Path toDeleteFileInSnapshot = SnapshotTestHelper.getSnapshotPath(dir, "s0", toDeleteFile.toString().substring(dir.toString().length()));
    try {
        hdfs.getFileStatus(toDeleteFileInSnapshot);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(), e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) DirectoryDiffList(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList) FileNotFoundException(java.io.FileNotFoundException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

FileNotFoundException (java.io.FileNotFoundException)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 Path (org.apache.hadoop.fs.Path)1 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)1 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)1 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)1 DirectoryDiffList (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList)1 Test (org.junit.Test)1