Search in sources :

Example 1 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class FileWithSnapshotFeature method cleanFile.

public void cleanFile(INode.ReclaimContext reclaimContext, final INodeFile file, final int snapshotId, int priorSnapshotId, byte storagePolicyId) {
    if (snapshotId == Snapshot.CURRENT_STATE_ID) {
        // delete the current file while the file has snapshot feature
        if (!isCurrentFileDeleted()) {
            file.recordModification(priorSnapshotId);
            deleteCurrentFile();
        }
        final BlockStoragePolicy policy = reclaimContext.storagePolicySuite().getPolicy(storagePolicyId);
        QuotaCounts old = file.storagespaceConsumed(policy);
        collectBlocksAndClear(reclaimContext, file);
        QuotaCounts current = file.storagespaceConsumed(policy);
        reclaimContext.quotaDelta().add(old.subtract(current));
    } else {
        // delete the snapshot
        priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
        diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId, file);
    }
}
Also used : BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts)

Example 2 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class DirectoryWithSnapshotFeature method computeQuotaUsage4CurrentDirectory.

public QuotaCounts computeQuotaUsage4CurrentDirectory(BlockStoragePolicySuite bsps, byte storagePolicyId) {
    final QuotaCounts counts = new QuotaCounts.Builder().build();
    for (DirectoryDiff d : diffs) {
        for (INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
            final byte childPolicyId = deleted.getStoragePolicyIDForQuota(storagePolicyId);
            counts.add(deleted.computeQuotaUsage(bsps, childPolicyId, false, Snapshot.CURRENT_STATE_ID));
        }
    }
    return counts;
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts)

Example 3 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_4.

/**
   * After the following operations:
   * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
   * again -> delete snapshot s on dst tree
   * 
   * Make sure we only delete the snapshot s under the renamed dir.
   */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    hdfs.mkdirs(sdir2);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    final Path foo2 = new Path(sdir2, "foo");
    hdfs.rename(foo, foo2);
    // create two new files under foo2
    final Path bar2 = new Path(foo2, "bar2");
    DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
    final Path bar3 = new Path(foo2, "bar3");
    DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
    // create a new snapshot on sdir2
    hdfs.createSnapshot(sdir2, "s3");
    // rename foo2 again
    hdfs.rename(foo2, foo);
    // delete snapshot s3
    hdfs.deleteSnapshot(sdir2, "s3");
    // check
    final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString()).asDirectory();
    // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
    QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(7, q1.getNameSpace());
    final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
    QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(1, q2.getNameSpace());
    final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", foo.getName());
    final INode fooRef = fsdir.getINode(foo_s1.toString());
    assertTrue(fooRef instanceof INodeReference.WithName);
    INodeReference.WithCount wc = (WithCount) fooRef.asReference().getReferredINode();
    assertEquals(2, wc.getReferenceCount());
    INodeDirectory fooNode = wc.getReferredINode().asDirectory();
    ReadOnlyList<INode> children = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(3, children.size());
    assertEquals(bar.getName(), children.get(0).getLocalName());
    assertEquals(bar2.getName(), children.get(1).getLocalName());
    assertEquals(bar3.getName(), children.get(2).getLocalName());
    List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
    assertEquals(1, diffList.size());
    Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
    ChildrenDiff diff = diffList.get(0).getChildrenDiff();
    // bar2 and bar3 in the created list
    assertEquals(2, diff.getList(ListType.CREATED).size());
    assertEquals(0, diff.getList(ListType.DELETED).size());
    final INode fooRef2 = fsdir.getINode4Write(foo.toString());
    assertTrue(fooRef2 instanceof INodeReference.DstReference);
    INodeReference.WithCount wc2 = (WithCount) fooRef2.asReference().getReferredINode();
    assertSame(wc, wc2);
    assertSame(fooRef2, wc.getParentReference());
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) ChildrenDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 4 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameExceedQuota.

/**
   * Test the rename undo when quota of dst tree is exceeded after rename.
   */
@Test
public void testRenameExceedQuota() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    final Path sub_dir2 = new Path(dir2, "subdir");
    final Path subfile_dir2 = new Path(sub_dir2, "subfile");
    hdfs.mkdirs(dir1);
    DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);
    final Path foo = new Path(dir1, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // set ns quota of dir2 to 4, so the current remaining is 1 (already has
    // dir2, sub_dir2, subfile_dir2, and s2)
    hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
    // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
    // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
    // as 1 in NS quota. The rename operation will succeed while the real quota 
    // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
    // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
    hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);
    // check dir2
    INode dir2Node = fsdir.getINode4Write(dir2.toString());
    assertTrue(dir2Node.asDirectory().isSnapshottable());
    QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
    assertEquals(4, counts.getNameSpace());
    assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INode(org.apache.hadoop.hdfs.server.namenode.INode) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 5 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_2.

/**
   * After rename, delete the snapshot in src
   */
@Test
public void testRenameDirAndDeleteSnapshot_2() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    final Path bar2 = new Path(newfoo, "bar2");
    DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
    hdfs.createSnapshot(sdir1, "s4");
    hdfs.delete(newfoo, true);
    final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo/bar2");
    assertTrue(hdfs.exists(bar2_s4));
    final Path bar_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo/bar");
    assertTrue(hdfs.exists(bar_s4));
    // delete snapshot s4. The diff of s4 should be combined to s3
    hdfs.deleteSnapshot(sdir1, "s4");
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
    assertFalse(hdfs.exists(bar_s3));
    bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar");
    assertTrue(hdfs.exists(bar_s3));
    Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar2");
    assertFalse(hdfs.exists(bar2_s3));
    bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar2");
    assertFalse(hdfs.exists(bar2_s3));
    // delete snapshot s3
    hdfs.deleteSnapshot(sdir2, "s3");
    final Path bar_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo/bar");
    assertTrue(hdfs.exists(bar_s2));
    // check internal details
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
    assertTrue(fooRef instanceof INodeReference.WithName);
    INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
    assertEquals(1, fooWC.getReferenceCount());
    INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
    List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
    assertEquals(1, diffs.size());
    assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // delete snapshot s2.
    hdfs.deleteSnapshot(sdir2, "s2");
    assertFalse(hdfs.exists(bar_s2));
    restartClusterAndCheckImage(true);
    // make sure the whole referred subtree has been destroyed
    QuotaCounts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(3, q.getNameSpace());
    assertEquals(0, q.getStorageSpace());
    hdfs.deleteSnapshot(sdir1, "s1");
    restartClusterAndCheckImage(true);
    q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(3, q.getNameSpace());
    assertEquals(0, q.getStorageSpace());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) Test(org.junit.Test)

Aggregations

QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)12 INode (org.apache.hadoop.hdfs.server.namenode.INode)8 Test (org.junit.Test)7 Path (org.apache.hadoop.fs.Path)6 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)6 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)6 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)5 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)3 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)3 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)2 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)2 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 StorageType (org.apache.hadoop.fs.StorageType)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)1