use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class FileWithSnapshotFeature method updateQuotaAndCollectBlocks.
public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
byte storagePolicyID = file.getStoragePolicyID();
BlockStoragePolicy bsp = null;
if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
}
QuotaCounts oldCounts;
if (removed.snapshotINode != null) {
oldCounts = new QuotaCounts.Builder().build();
BlockInfo[] blocks = file.getBlocks() == null ? new BlockInfo[0] : file.getBlocks();
for (BlockInfo b : blocks) {
short replication = b.getReplication();
long blockSize = b.isComplete() ? b.getNumBytes() : file.getPreferredBlockSize();
oldCounts.addStorageSpace(blockSize * replication);
if (bsp != null) {
List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
for (StorageType t : oldTypeChosen) {
if (t.supportTypeQuota()) {
oldCounts.addTypeSpace(t, blockSize);
}
}
}
}
AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
if (aclFeature != null) {
AclStorage.removeAclFeature(aclFeature);
}
} else {
oldCounts = file.storagespaceConsumed(null);
}
getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
if (file.getBlocks() != null) {
short replInDiff = getMaxBlockRepInDiffs(removed);
short repl = (short) Math.max(file.getPreferredBlockReplication(), replInDiff);
for (BlockInfo b : file.getBlocks()) {
if (repl != b.getReplication()) {
reclaimContext.collectedBlocks().addUpdateReplicationFactor(b, repl);
}
}
}
QuotaCounts current = file.storagespaceConsumed(bsp);
reclaimContext.quotaDelta().add(oldCounts.subtract(current));
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestBlockStoragePolicy method testChooseSsdOverDisk.
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
final String[] racks = { "/d1/r1", "/d1/r1", "/d1/r1" };
final String[] hosts = { "host1", "host2", "host3" };
final StorageType[] disks = { StorageType.DISK, StorageType.DISK, StorageType.DISK };
final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages);
for (int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy, null);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
Assert.assertEquals(2, targets.length);
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestBlockStoragePolicy method checkLocatedBlocks.
private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum, int replicaNum, StorageType... types) {
List<StorageType> typeList = Lists.newArrayList();
Collections.addAll(typeList, types);
LocatedBlocks lbs = status.getBlockLocations();
Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
for (StorageType type : lb.getStorageTypes()) {
Assert.assertTrue(typeList.remove(type));
}
}
Assert.assertTrue(typeList.isEmpty());
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestBlockStoragePolicy method assertStorageType.
static void assertStorageType(List<StorageType> computed, short replication, StorageType... answers) {
Assert.assertEquals(replication, computed.size());
final StorageType last = answers[answers.length - 1];
for (int i = 0; i < computed.size(); i++) {
final StorageType expected = i < answers.length ? answers[i] : last;
Assert.assertEquals(expected, computed.get(i));
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestBlockStoragePolicy method testGetAllStoragePoliciesFromFs.
/**
* Verify that {@link FileSystem#getAllStoragePolicies} returns all
* known storage policies for DFS.
*
* @throws IOException
*/
@Test
public void testGetAllStoragePoliciesFromFs() throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
// Get policies via {@link FileSystem#getAllStoragePolicies}
Set<String> policyNamesSet1 = new HashSet<>();
for (BlockStoragePolicySpi policy : cluster.getFileSystem().getAllStoragePolicies()) {
policyNamesSet1.add(policy.getName());
}
// Get policies from the default BlockStoragePolicySuite.
BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
Set<String> policyNamesSet2 = new HashSet<>();
for (BlockStoragePolicy policy : suite.getAllPolicies()) {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} finally {
cluster.shutdown();
}
}
Aggregations