Search in sources :

Example 6 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class DirectoryWithSnapshotFeature method cleanDirectory.

public void cleanDirectory(INode.ReclaimContext reclaimContext, final INodeDirectory currentINode, final int snapshot, int prior) {
    Map<INode, INode> priorCreated = null;
    Map<INode, INode> priorDeleted = null;
    QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy();
    if (snapshot == Snapshot.CURRENT_STATE_ID) {
        // delete the current directory
        currentINode.recordModification(prior);
        // delete everything in created list
        DirectoryDiff lastDiff = diffs.getLast();
        if (lastDiff != null) {
            lastDiff.diff.destroyCreatedList(reclaimContext, currentINode);
        }
        currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior, null);
    } else {
        // update prior
        prior = getDiffs().updatePrior(snapshot, prior);
        // its original created and deleted list before deleting post
        if (prior != NO_SNAPSHOT_ID) {
            DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
                List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
                priorCreated = cloneDiffList(cList);
                priorDeleted = cloneDiffList(dList);
            }
        }
        getDiffs().deleteSnapshotDiff(reclaimContext, snapshot, prior, currentINode);
        currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior, priorDeleted);
        // check priorDiff again since it may be created during the diff deletion
        if (prior != NO_SNAPSHOT_ID) {
            DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                // cleanSubtreeRecursively call.
                if (priorCreated != null) {
                    // we only check the node originally in prior's created list
                    for (INode cNode : priorDiff.getChildrenDiff().getList(ListType.CREATED)) {
                        if (priorCreated.containsKey(cNode)) {
                            cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID);
                        }
                    }
                }
                for (INode dNode : priorDiff.getChildrenDiff().getList(ListType.DELETED)) {
                    if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
                        cleanDeletedINode(reclaimContext, dNode, snapshot, prior);
                    }
                }
            }
        }
    }
    QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy();
    current.subtract(old);
    if (currentINode.isQuotaSet()) {
        reclaimContext.quotaDelta().addQuotaDirUpdate(currentINode, current);
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts)

Example 7 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class FileWithSnapshotFeature method updateQuotaAndCollectBlocks.

public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
    byte storagePolicyID = file.getStoragePolicyID();
    BlockStoragePolicy bsp = null;
    if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
        bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
    }
    QuotaCounts oldCounts;
    if (removed.snapshotINode != null) {
        oldCounts = new QuotaCounts.Builder().build();
        BlockInfo[] blocks = file.getBlocks() == null ? new BlockInfo[0] : file.getBlocks();
        for (BlockInfo b : blocks) {
            short replication = b.getReplication();
            long blockSize = b.isComplete() ? b.getNumBytes() : file.getPreferredBlockSize();
            oldCounts.addStorageSpace(blockSize * replication);
            if (bsp != null) {
                List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
                for (StorageType t : oldTypeChosen) {
                    if (t.supportTypeQuota()) {
                        oldCounts.addTypeSpace(t, blockSize);
                    }
                }
            }
        }
        AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
        if (aclFeature != null) {
            AclStorage.removeAclFeature(aclFeature);
        }
    } else {
        oldCounts = file.storagespaceConsumed(null);
    }
    getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
    if (file.getBlocks() != null) {
        short replInDiff = getMaxBlockRepInDiffs(removed);
        short repl = (short) Math.max(file.getPreferredBlockReplication(), replInDiff);
        for (BlockInfo b : file.getBlocks()) {
            if (repl != b.getReplication()) {
                reclaimContext.collectedBlocks().addUpdateReplicationFactor(b, repl);
            }
        }
    }
    QuotaCounts current = file.storagespaceConsumed(bsp);
    reclaimContext.quotaDelta().add(oldCounts.subtract(current));
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) AclFeature(org.apache.hadoop.hdfs.server.namenode.AclFeature)

Example 8 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.

@Test
public void testUpdateQuotaAndCollectBlocks() {
    FileDiffList diffs = new FileDiffList();
    FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
    FileDiff diff = mock(FileDiff.class);
    BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
    BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
    BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
    BlockManager bm = mock(BlockManager.class);
    // No snapshot
    INodeFile file = mock(INodeFile.class);
    when(file.getFileWithSnapshotFeature()).thenReturn(sf);
    when(file.getBlocks()).thenReturn(blocks);
    when(file.getStoragePolicyID()).thenReturn((byte) 1);
    Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
    when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
    when(bsps.getPolicy(anyByte())).thenReturn(bsp);
    INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
    ArrayList<INode> removedINodes = new ArrayList<>();
    INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals(0, counts.getStorageSpace());
    Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
    // INode only exists in the snapshot
    INodeFile snapshotINode = mock(INodeFile.class);
    Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
    Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
    when(diff.getSnapshotINode()).thenReturn(snapshotINode);
    when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
    when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
    blocks[0].setReplication(REPL_3);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
    Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
    Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayList(java.util.ArrayList) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) Block(org.apache.hadoop.hdfs.protocol.Block) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 9 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_3.

/**
   * After the following operations:
   * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
   * -> delete snapshot s on dst tree
   * 
   * Make sure we destroy everything created after the rename under the renamed
   * dir.
   */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    hdfs.mkdirs(sdir2);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    final Path foo2 = new Path(sdir2, "foo");
    hdfs.rename(foo, foo2);
    // create two new files under foo2
    final Path bar2 = new Path(foo2, "bar2");
    DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
    final Path bar3 = new Path(foo2, "bar3");
    DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
    // create a new snapshot on sdir2
    hdfs.createSnapshot(sdir2, "s3");
    // delete foo2
    hdfs.delete(foo2, true);
    // delete s3
    hdfs.deleteSnapshot(sdir2, "s3");
    // check
    final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString()).asDirectory();
    QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(3, q1.getNameSpace());
    final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
    QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(1, q2.getNameSpace());
    final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", foo.getName());
    INode fooRef = fsdir.getINode(foo_s1.toString());
    assertTrue(fooRef instanceof INodeReference.WithName);
    INodeReference.WithCount wc = (WithCount) fooRef.asReference().getReferredINode();
    assertEquals(1, wc.getReferenceCount());
    INodeDirectory fooNode = wc.getReferredINode().asDirectory();
    ReadOnlyList<INode> children = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(1, children.size());
    assertEquals(bar.getName(), children.get(0).getLocalName());
    List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
    assertEquals(1, diffList.size());
    Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
    ChildrenDiff diff = diffList.get(0).getChildrenDiff();
    assertEquals(0, diff.getList(ListType.CREATED).size());
    assertEquals(0, diff.getList(ListType.DELETED).size());
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) ChildrenDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 10 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_6.

/**
   * Test the rename undo when removing dst node fails
   */
@Test
public void testRenameUndo_6() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    final Path sub_dir2 = new Path(dir2, "subdir");
    final Path subsub_dir2 = new Path(sub_dir2, "subdir");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(subsub_dir2);
    final Path foo = new Path(dir1, "foo");
    hdfs.mkdirs(foo);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // set ns quota of dir2 to 4, so the current remaining is 1 (already has
    // dir2, sub_dir2, and subsub_dir2)
    hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
    FSDirectory fsdir2 = Mockito.spy(fsdir);
    Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2).removeLastINode((INodesInPath) Mockito.anyObject());
    Whitebox.setInternalState(fsn, "dir", fsdir2);
    // subsub_dir2.
    try {
        hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
        fail("Expect QuotaExceedException");
    } catch (Exception e) {
        String msg = "fake exception";
        GenericTestUtils.assertExceptionContains(msg, e);
    }
    // check the undo
    assertTrue(hdfs.exists(foo));
    INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
    List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode fooNode = childrenList.get(0);
    assertTrue(fooNode.asDirectory().isWithSnapshot());
    assertSame(dir1Node, fooNode.getParent());
    List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    DirectoryDiff diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
    // check dir2
    INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
    assertTrue(dir2Node.isSnapshottable());
    QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
    assertEquals(3, counts.getNameSpace());
    assertEquals(0, counts.getStorageSpace());
    childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode subdir2Node = childrenList.get(0);
    assertSame(dir2Node, subdir2Node.getParent());
    assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
    INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
    assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
    assertSame(subdir2Node, subsubdir2Node.getParent());
    diffList = (dir2Node).getDiffs().asList();
    assertEquals(1, diffList.size());
    diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)12 INode (org.apache.hadoop.hdfs.server.namenode.INode)8 Test (org.junit.Test)7 Path (org.apache.hadoop.fs.Path)6 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)6 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)6 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)5 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)3 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)3 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)2 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)2 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 StorageType (org.apache.hadoop.fs.StorageType)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)1