use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.
@Test
public void testUpdateQuotaAndCollectBlocks() {
FileDiffList diffs = new FileDiffList();
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
FileDiff diff = mock(FileDiff.class);
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
BlockManager bm = mock(BlockManager.class);
// No snapshot
INodeFile file = mock(INodeFile.class);
when(file.getFileWithSnapshotFeature()).thenReturn(sf);
when(file.getBlocks()).thenReturn(blocks);
when(file.getStoragePolicyID()).thenReturn((byte) 1);
Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
when(bsps.getPolicy(anyByte())).thenReturn(bsp);
INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
ArrayList<INode> removedINodes = new ArrayList<>();
INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals(0, counts.getStorageSpace());
Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);
when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
blocks[0].setReplication(REPL_3);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class JsonUtilClient method getStoragePolicies.
public static Collection<BlockStoragePolicy> getStoragePolicies(Map<?, ?> json) {
Map<?, ?> policiesJson = (Map<?, ?>) json.get("BlockStoragePolicies");
if (policiesJson != null) {
List<?> objs = (List<?>) policiesJson.get(BlockStoragePolicy.class.getSimpleName());
if (objs != null) {
BlockStoragePolicy[] storagePolicies = new BlockStoragePolicy[objs.size()];
for (int i = 0; i < objs.size(); i++) {
final Map<?, ?> m = (Map<?, ?>) objs.get(i);
BlockStoragePolicy blockStoragePolicy = toBlockStoragePolicy(m);
storagePolicies[i] = blockStoragePolicy;
}
return Arrays.asList(storagePolicies);
}
}
return new ArrayList<BlockStoragePolicy>(0);
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method getStoragePolicies.
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(RpcController controller, GetStoragePoliciesRequestProto request) throws ServiceException {
try {
BlockStoragePolicy[] policies = server.getStoragePolicies();
GetStoragePoliciesResponseProto.Builder builder = GetStoragePoliciesResponseProto.newBuilder();
if (policies == null) {
return builder.build();
}
for (BlockStoragePolicy policy : policies) {
builder.addPolicies(PBHelperClient.convert(policy));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class BlockStoragePolicySuite method createDefaultSuite.
@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
final BlockStoragePolicy[] policies = new BlockStoragePolicy[1 << ID_BIT_LENGTH];
final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, HdfsConstants.MEMORY_STORAGE_POLICY_NAME, new StorageType[] { StorageType.RAM_DISK, StorageType.DISK }, new StorageType[] { StorageType.DISK }, new StorageType[] { StorageType.DISK }, // Cannot be changed on regular files, but inherited.
true);
final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
policies[allssdId] = new BlockStoragePolicy(allssdId, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME, new StorageType[] { StorageType.SSD }, new StorageType[] { StorageType.DISK }, new StorageType[] { StorageType.DISK });
final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
policies[onessdId] = new BlockStoragePolicy(onessdId, HdfsConstants.ONESSD_STORAGE_POLICY_NAME, new StorageType[] { StorageType.SSD, StorageType.DISK }, new StorageType[] { StorageType.SSD, StorageType.DISK }, new StorageType[] { StorageType.SSD, StorageType.DISK });
final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
policies[hotId] = new BlockStoragePolicy(hotId, HdfsConstants.HOT_STORAGE_POLICY_NAME, new StorageType[] { StorageType.DISK }, StorageType.EMPTY_ARRAY, new StorageType[] { StorageType.ARCHIVE });
final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
policies[warmId] = new BlockStoragePolicy(warmId, HdfsConstants.WARM_STORAGE_POLICY_NAME, new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] { StorageType.DISK, StorageType.ARCHIVE });
final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
policies[coldId] = new BlockStoragePolicy(coldId, HdfsConstants.COLD_STORAGE_POLICY_NAME, new StorageType[] { StorageType.ARCHIVE }, StorageType.EMPTY_ARRAY, StorageType.EMPTY_ARRAY);
return new BlockStoragePolicySuite(hotId, policies);
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class BlockManager method chooseExcessRedundancyStriped.
/**
* We want block group has every internal block, but we have redundant
* internal blocks (which have the same index).
* In this method, we delete the redundant internal blocks until only one
* left for each index.
*
* The block placement policy will make sure that the left internal blocks are
* spread across racks and also try hard to pick one with least free space.
*/
private void chooseExcessRedundancyStriped(BlockCollection bc, final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, DatanodeDescriptor delNodeHint) {
assert storedBlock instanceof BlockInfoStriped;
BlockInfoStriped sblk = (BlockInfoStriped) storedBlock;
short groupSize = sblk.getTotalBlockNum();
// find all duplicated indices
//indices found
BitSet found = new BitSet(groupSize);
//indices found more than once
BitSet duplicated = new BitSet(groupSize);
HashMap<DatanodeStorageInfo, Integer> storage2index = new HashMap<>();
for (DatanodeStorageInfo storage : nonExcess) {
int index = sblk.getStorageBlockIndex(storage);
assert index >= 0;
if (found.get(index)) {
duplicated.set(index);
}
found.set(index);
storage2index.put(storage, index);
}
// use delHint only if delHint is duplicated
final DatanodeStorageInfo delStorageHint = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint);
if (delStorageHint != null) {
Integer index = storage2index.get(delStorageHint);
if (index != null && duplicated.get(index)) {
processChosenExcessRedundancy(nonExcess, delStorageHint, storedBlock);
}
}
// cardinality of found indicates the expected number of internal blocks
final int numOfTarget = found.cardinality();
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID());
final List<StorageType> excessTypes = storagePolicy.chooseExcess((short) numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess));
if (excessTypes.isEmpty()) {
LOG.warn("excess types chosen for block {} among storages {} is empty", storedBlock, nonExcess);
return;
}
BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(STRIPED);
// for each duplicated index, delete some replicas until only one left
for (int targetIndex = duplicated.nextSetBit(0); targetIndex >= 0; targetIndex = duplicated.nextSetBit(targetIndex + 1)) {
List<DatanodeStorageInfo> candidates = new ArrayList<>();
for (DatanodeStorageInfo storage : nonExcess) {
int index = storage2index.get(storage);
if (index == targetIndex) {
candidates.add(storage);
}
}
if (candidates.size() > 1) {
List<DatanodeStorageInfo> replicasToDelete = placementPolicy.chooseReplicasToDelete(nonExcess, candidates, (short) 1, excessTypes, null, null);
for (DatanodeStorageInfo chosen : replicasToDelete) {
processChosenExcessRedundancy(nonExcess, chosen, storedBlock);
candidates.remove(chosen);
}
}
duplicated.clear(targetIndex);
}
}
Aggregations