Search in sources :

Example 31 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.

@Test
public void testUpdateQuotaAndCollectBlocks() {
    FileDiffList diffs = new FileDiffList();
    FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
    FileDiff diff = mock(FileDiff.class);
    BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
    BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
    BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
    BlockManager bm = mock(BlockManager.class);
    // No snapshot
    INodeFile file = mock(INodeFile.class);
    when(file.getFileWithSnapshotFeature()).thenReturn(sf);
    when(file.getBlocks()).thenReturn(blocks);
    when(file.getStoragePolicyID()).thenReturn((byte) 1);
    Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
    when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
    when(bsps.getPolicy(anyByte())).thenReturn(bsp);
    INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
    ArrayList<INode> removedINodes = new ArrayList<>();
    INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals(0, counts.getStorageSpace());
    Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
    // INode only exists in the snapshot
    INodeFile snapshotINode = mock(INodeFile.class);
    Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
    Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
    when(diff.getSnapshotINode()).thenReturn(snapshotINode);
    when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
    when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
    blocks[0].setReplication(REPL_3);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
    Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
    Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayList(java.util.ArrayList) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) Block(org.apache.hadoop.hdfs.protocol.Block) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 32 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class JsonUtilClient method getStoragePolicies.

public static Collection<BlockStoragePolicy> getStoragePolicies(Map<?, ?> json) {
    Map<?, ?> policiesJson = (Map<?, ?>) json.get("BlockStoragePolicies");
    if (policiesJson != null) {
        List<?> objs = (List<?>) policiesJson.get(BlockStoragePolicy.class.getSimpleName());
        if (objs != null) {
            BlockStoragePolicy[] storagePolicies = new BlockStoragePolicy[objs.size()];
            for (int i = 0; i < objs.size(); i++) {
                final Map<?, ?> m = (Map<?, ?>) objs.get(i);
                BlockStoragePolicy blockStoragePolicy = toBlockStoragePolicy(m);
                storagePolicies[i] = blockStoragePolicy;
            }
            return Arrays.asList(storagePolicies);
        }
    }
    return new ArrayList<BlockStoragePolicy>(0);
}
Also used : ArrayList(java.util.ArrayList) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map)

Example 33 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class ClientNamenodeProtocolServerSideTranslatorPB method getStoragePolicies.

@Override
public GetStoragePoliciesResponseProto getStoragePolicies(RpcController controller, GetStoragePoliciesRequestProto request) throws ServiceException {
    try {
        BlockStoragePolicy[] policies = server.getStoragePolicies();
        GetStoragePoliciesResponseProto.Builder builder = GetStoragePoliciesResponseProto.newBuilder();
        if (policies == null) {
            return builder.build();
        }
        for (BlockStoragePolicy policy : policies) {
            builder.addPolicies(PBHelperClient.convert(policy));
        }
        return builder.build();
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : GetStoragePoliciesResponseProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto) ServiceException(com.google.protobuf.ServiceException) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) IOException(java.io.IOException)

Example 34 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class BlockStoragePolicySuite method createDefaultSuite.

@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
    final BlockStoragePolicy[] policies = new BlockStoragePolicy[1 << ID_BIT_LENGTH];
    final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
    policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, HdfsConstants.MEMORY_STORAGE_POLICY_NAME, new StorageType[] { StorageType.RAM_DISK, StorageType.DISK }, new StorageType[] { StorageType.DISK }, new StorageType[] { StorageType.DISK }, // Cannot be changed on regular files, but inherited.
    true);
    final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
    policies[allssdId] = new BlockStoragePolicy(allssdId, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME, new StorageType[] { StorageType.SSD }, new StorageType[] { StorageType.DISK }, new StorageType[] { StorageType.DISK });
    final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
    policies[onessdId] = new BlockStoragePolicy(onessdId, HdfsConstants.ONESSD_STORAGE_POLICY_NAME, new StorageType[] { StorageType.SSD, StorageType.DISK }, new StorageType[] { StorageType.SSD, StorageType.DISK }, new StorageType[] { StorageType.SSD, StorageType.DISK });
    final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
    policies[hotId] = new BlockStoragePolicy(hotId, HdfsConstants.HOT_STORAGE_POLICY_NAME, new StorageType[] { StorageType.DISK }, StorageType.EMPTY_ARRAY, new StorageType[] { StorageType.ARCHIVE });
    final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
    policies[warmId] = new BlockStoragePolicy(warmId, HdfsConstants.WARM_STORAGE_POLICY_NAME, new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] { StorageType.DISK, StorageType.ARCHIVE });
    final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
    policies[coldId] = new BlockStoragePolicy(coldId, HdfsConstants.COLD_STORAGE_POLICY_NAME, new StorageType[] { StorageType.ARCHIVE }, StorageType.EMPTY_ARRAY, StorageType.EMPTY_ARRAY);
    return new BlockStoragePolicySuite(hotId, policies);
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 35 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class BlockManager method chooseExcessRedundancyStriped.

/**
   * We want block group has every internal block, but we have redundant
   * internal blocks (which have the same index).
   * In this method, we delete the redundant internal blocks until only one
   * left for each index.
   *
   * The block placement policy will make sure that the left internal blocks are
   * spread across racks and also try hard to pick one with least free space.
   */
private void chooseExcessRedundancyStriped(BlockCollection bc, final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, DatanodeDescriptor delNodeHint) {
    assert storedBlock instanceof BlockInfoStriped;
    BlockInfoStriped sblk = (BlockInfoStriped) storedBlock;
    short groupSize = sblk.getTotalBlockNum();
    // find all duplicated indices
    //indices found
    BitSet found = new BitSet(groupSize);
    //indices found more than once
    BitSet duplicated = new BitSet(groupSize);
    HashMap<DatanodeStorageInfo, Integer> storage2index = new HashMap<>();
    for (DatanodeStorageInfo storage : nonExcess) {
        int index = sblk.getStorageBlockIndex(storage);
        assert index >= 0;
        if (found.get(index)) {
            duplicated.set(index);
        }
        found.set(index);
        storage2index.put(storage, index);
    }
    // use delHint only if delHint is duplicated
    final DatanodeStorageInfo delStorageHint = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint);
    if (delStorageHint != null) {
        Integer index = storage2index.get(delStorageHint);
        if (index != null && duplicated.get(index)) {
            processChosenExcessRedundancy(nonExcess, delStorageHint, storedBlock);
        }
    }
    // cardinality of found indicates the expected number of internal blocks
    final int numOfTarget = found.cardinality();
    final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID());
    final List<StorageType> excessTypes = storagePolicy.chooseExcess((short) numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess));
    if (excessTypes.isEmpty()) {
        LOG.warn("excess types chosen for block {} among storages {} is empty", storedBlock, nonExcess);
        return;
    }
    BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(STRIPED);
    // for each duplicated index, delete some replicas until only one left
    for (int targetIndex = duplicated.nextSetBit(0); targetIndex >= 0; targetIndex = duplicated.nextSetBit(targetIndex + 1)) {
        List<DatanodeStorageInfo> candidates = new ArrayList<>();
        for (DatanodeStorageInfo storage : nonExcess) {
            int index = storage2index.get(storage);
            if (index == targetIndex) {
                candidates.add(storage);
            }
        }
        if (candidates.size() > 1) {
            List<DatanodeStorageInfo> replicasToDelete = placementPolicy.chooseReplicasToDelete(nonExcess, candidates, (short) 1, excessTypes, null, null);
            for (DatanodeStorageInfo chosen : replicasToDelete) {
                processChosenExcessRedundancy(nonExcess, chosen, storedBlock);
                candidates.remove(chosen);
            }
        }
        duplicated.clear(targetIndex);
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) HashMap(java.util.HashMap) BitSet(java.util.BitSet) ArrayList(java.util.ArrayList) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Aggregations

BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)48 Test (org.junit.Test)19 BlockStoragePolicySuite (org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite)15 Path (org.apache.hadoop.fs.Path)12 StorageType (org.apache.hadoop.fs.StorageType)12 IOException (java.io.IOException)8 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)5 LinkedHashMap (java.util.LinkedHashMap)5 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)3 BlockStoragePolicySpi (org.apache.hadoop.fs.BlockStoragePolicySpi)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)3 Field (java.lang.reflect.Field)2 List (java.util.List)2 FileSystem (org.apache.hadoop.fs.FileSystem)2