Search in sources :

Example 11 with BlockStoragePolicySuite

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.

the class TestStoragePolicySummary method testSortInDescendingOrder.

@Test
public void testSortInDescendingOrder() {
    BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
    StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
    BlockStoragePolicy hot = bsps.getPolicy("HOT");
    BlockStoragePolicy warm = bsps.getPolicy("WARM");
    BlockStoragePolicy cold = bsps.getPolicy("COLD");
    //DISK:3
    sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
    sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
    //DISK:1,ARCHIVE:2
    sts.add(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE }, warm);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.DISK, StorageType.ARCHIVE }, warm);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.DISK }, warm);
    //ARCHIVE:3
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
    Map<String, Long> actualOutput = convertToStringMap(sts);
    Assert.assertEquals(3, actualOutput.size());
    Map<String, Long> expectedOutput = new LinkedHashMap<>();
    expectedOutput.put("COLD|ARCHIVE:3(COLD)", 4l);
    expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
    expectedOutput.put("HOT|DISK:3(HOT)", 2l);
    Assert.assertEquals(expectedOutput.toString(), actualOutput.toString());
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.Test)

Example 12 with BlockStoragePolicySuite

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.

the class TestStoragePolicySummary method testDifferentSpecifiedPolicies.

@Test
public void testDifferentSpecifiedPolicies() {
    BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
    StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
    BlockStoragePolicy hot = bsps.getPolicy("HOT");
    BlockStoragePolicy warm = bsps.getPolicy("WARM");
    BlockStoragePolicy cold = bsps.getPolicy("COLD");
    //DISK:3
    sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
    sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
    sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, warm);
    sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, cold);
    //DISK:1,ARCHIVE:2
    sts.add(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE }, hot);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.DISK, StorageType.ARCHIVE }, warm);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.DISK }, cold);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.DISK }, cold);
    //ARCHIVE:3
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, hot);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, hot);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, warm);
    sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
    Map<String, Long> actualOutput = convertToStringMap(sts);
    Assert.assertEquals(9, actualOutput.size());
    Map<String, Long> expectedOutput = new HashMap<>();
    expectedOutput.put("HOT|DISK:3(HOT)", 2l);
    expectedOutput.put("COLD|DISK:1,ARCHIVE:2(WARM)", 2l);
    expectedOutput.put("HOT|ARCHIVE:3(COLD)", 2l);
    expectedOutput.put("WARM|DISK:3(HOT)", 1l);
    expectedOutput.put("COLD|DISK:3(HOT)", 1l);
    expectedOutput.put("WARM|ARCHIVE:3(COLD)", 1l);
    expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 1l);
    expectedOutput.put("COLD|ARCHIVE:3(COLD)", 1l);
    expectedOutput.put("HOT|DISK:1,ARCHIVE:2(WARM)", 1l);
    Assert.assertEquals(expectedOutput, actualOutput);
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) Test(org.junit.Test)

Example 13 with BlockStoragePolicySuite

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.

the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.

@Test
public void testUpdateQuotaAndCollectBlocks() {
    FileDiffList diffs = new FileDiffList();
    FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
    FileDiff diff = mock(FileDiff.class);
    BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
    BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
    BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
    BlockManager bm = mock(BlockManager.class);
    // No snapshot
    INodeFile file = mock(INodeFile.class);
    when(file.getFileWithSnapshotFeature()).thenReturn(sf);
    when(file.getBlocks()).thenReturn(blocks);
    when(file.getStoragePolicyID()).thenReturn((byte) 1);
    Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
    when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
    when(bsps.getPolicy(anyByte())).thenReturn(bsp);
    INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
    ArrayList<INode> removedINodes = new ArrayList<>();
    INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals(0, counts.getStorageSpace());
    Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
    // INode only exists in the snapshot
    INodeFile snapshotINode = mock(INodeFile.class);
    Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
    Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
    when(diff.getSnapshotINode()).thenReturn(snapshotINode);
    when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
    when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
    blocks[0].setReplication(REPL_3);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
    Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
    Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayList(java.util.ArrayList) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) Block(org.apache.hadoop.hdfs.protocol.Block) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 14 with BlockStoragePolicySuite

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.

the class FSDirRenameOp method unprotectedRenameTo.

/**
   * Rename src to dst.
   * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)}
   * for details related to rename semantics and exceptions.
   *
   * @param fsd             FSDirectory
   * @param src             source path
   * @param dst             destination path
   * @param timestamp       modification time
   * @param collectedBlocks blocks to be removed
   * @param options         Rename options
   * @return whether a file/directory gets overwritten in the dst path
   */
static RenameResult unprotectedRenameTo(FSDirectory fsd, final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp, BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) throws IOException {
    assert fsd.hasWriteLock();
    boolean overwrite = options != null && Arrays.asList(options).contains(Options.Rename.OVERWRITE);
    final String src = srcIIP.getPath();
    final String dst = dstIIP.getPath();
    final String error;
    final INode srcInode = srcIIP.getLastINode();
    validateRenameSource(fsd, srcIIP);
    // validate the destination
    if (dst.equals(src)) {
        throw new FileAlreadyExistsException("The source " + src + " and destination " + dst + " are the same");
    }
    validateDestination(src, dst, srcInode);
    if (dstIIP.length() == 1) {
        error = "rename destination cannot be the root";
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
        throw new IOException(error);
    }
    BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
    fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
    final INode dstInode = dstIIP.getLastINode();
    List<INodeDirectory> snapshottableDirs = new ArrayList<>();
    if (dstInode != null) {
        // Destination exists
        validateOverwrite(src, dst, overwrite, srcInode, dstInode);
        FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
    }
    INode dstParent = dstIIP.getINode(-2);
    if (dstParent == null) {
        error = "rename destination parent " + dst + " not found.";
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
        throw new FileNotFoundException(error);
    }
    if (!dstParent.isDirectory()) {
        error = "rename destination parent " + dst + " is a file.";
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
        throw new ParentNotDirectoryException(error);
    }
    // Ensure dst has quota to accommodate rename
    verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
    verifyQuotaForRename(fsd, srcIIP, dstIIP);
    RenameOperation tx = new RenameOperation(fsd, srcIIP, dstIIP);
    boolean undoRemoveSrc = true;
    tx.removeSrc();
    boolean undoRemoveDst = false;
    long removedNum = 0;
    try {
        if (dstInode != null) {
            // dst exists, remove it
            removedNum = tx.removeDst();
            if (removedNum != -1) {
                undoRemoveDst = true;
            }
        }
        // add src as dst to complete rename
        INodesInPath renamedIIP = tx.addSourceToDestination();
        if (renamedIIP != null) {
            undoRemoveSrc = false;
            if (NameNode.stateChangeLog.isDebugEnabled()) {
                NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to " + dst);
            }
            tx.updateMtimeAndLease(timestamp);
            // Collect the blocks and remove the lease for previous dst
            boolean filesDeleted = false;
            if (undoRemoveDst) {
                undoRemoveDst = false;
                if (removedNum > 0) {
                    filesDeleted = tx.cleanDst(bsps, collectedBlocks);
                }
            }
            if (snapshottableDirs.size() > 0) {
                // There are snapshottable directories (without snapshots) to be
                // deleted. Need to update the SnapshotManager.
                fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
            }
            tx.updateQuotasInSourceTree(bsps);
            return createRenameResult(fsd, renamedIIP, filesDeleted, collectedBlocks);
        }
    } finally {
        if (undoRemoveSrc) {
            tx.restoreSource();
        }
        if (undoRemoveDst) {
            // Rename failed - restore dst
            tx.restoreDst(bsps);
        }
    }
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst);
    throw new IOException("rename from " + src + " to " + dst + " failed.");
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException)

Example 15 with BlockStoragePolicySuite

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.

the class TestApplyingStoragePolicy method testSetAndGetStoragePolicy.

@Test
public void testSetAndGetStoragePolicy() throws IOException {
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path fooz = new Path("/fooz");
    DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    final BlockStoragePolicy hot = suite.getPolicy("HOT");
    assertEquals(fs.getStoragePolicy(foo), hot);
    assertEquals(fs.getStoragePolicy(bar), hot);
    try {
        fs.getStoragePolicy(fooz);
    } catch (Exception e) {
        assertTrue(e instanceof FileNotFoundException);
    }
    /*
     * test: set storage policy
     */
    fs.setStoragePolicy(foo, warm.getName());
    fs.setStoragePolicy(bar, cold.getName());
    try {
        fs.setStoragePolicy(fooz, warm.getName());
    } catch (Exception e) {
        assertTrue(e instanceof FileNotFoundException);
    }
    /*
     * test: get storage policy after set
     */
    assertEquals(fs.getStoragePolicy(foo), warm);
    assertEquals(fs.getStoragePolicy(bar), cold);
    try {
        fs.getStoragePolicy(fooz);
    } catch (Exception e) {
        assertTrue(e instanceof FileNotFoundException);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) FileNotFoundException(java.io.FileNotFoundException) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Aggregations

BlockStoragePolicySuite (org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite)16 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)14 Test (org.junit.Test)13 Path (org.apache.hadoop.fs.Path)7 FileNotFoundException (java.io.FileNotFoundException)5 IOException (java.io.IOException)5 LinkedHashMap (java.util.LinkedHashMap)5 HashMap (java.util.HashMap)4 ArrayList (java.util.ArrayList)2 Field (java.lang.reflect.Field)1 HashSet (java.util.HashSet)1 BlockStoragePolicySpi (org.apache.hadoop.fs.BlockStoragePolicySpi)1 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)1