use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class TestApplyingStoragePolicy method testSetAndGetStoragePolicy.
@Test
public void testSetAndGetStoragePolicy() throws IOException {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path fooz = new Path("/fooz");
DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
final BlockStoragePolicy warm = suite.getPolicy("WARM");
final BlockStoragePolicy cold = suite.getPolicy("COLD");
final BlockStoragePolicy hot = suite.getPolicy("HOT");
assertEquals(fs.getStoragePolicy(foo), hot);
assertEquals(fs.getStoragePolicy(bar), hot);
try {
fs.getStoragePolicy(fooz);
} catch (Exception e) {
assertTrue(e instanceof FileNotFoundException);
}
/*
* test: set storage policy
*/
fs.setStoragePolicy(foo, warm.getName());
fs.setStoragePolicy(bar, cold.getName());
try {
fs.setStoragePolicy(fooz, warm.getName());
} catch (Exception e) {
assertTrue(e instanceof FileNotFoundException);
}
/*
* test: get storage policy after set
*/
assertEquals(fs.getStoragePolicy(foo), warm);
assertEquals(fs.getStoragePolicy(bar), cold);
try {
fs.getStoragePolicy(fooz);
} catch (Exception e) {
assertTrue(e instanceof FileNotFoundException);
}
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class TestReplicationPolicyWithUpgradeDomain method testChooseReplicasToDelete.
/**
* Verify the correct replica is chosen to satisfy both rack and upgrade
* domain policy.
* @throws Exception
*/
@Test
public void testChooseReplicasToDelete() throws Exception {
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[2]);
nonExcess.add(storages[3]);
List<DatanodeStorageInfo> excessReplicas;
BlockStoragePolicySuite POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
BlockStoragePolicy storagePolicy = POLICY_SUITE.getDefaultPolicy();
// delete hint accepted.
DatanodeDescriptor delHintNode = storages[0].getDatanodeDescriptor();
List<StorageType> excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[0]));
// delete hint rejected because deleting storages[1] would have
// cause only two upgrade domains left.
delHintNode = storages[1].getDatanodeDescriptor();
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[0]));
// no delete hint, case 1
nonExcess.clear();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[4]);
nonExcess.add(storages[8]);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[8].getDatanodeDescriptor(), null);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[1]));
// no delete hint, case 2
nonExcess.clear();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[4]);
nonExcess.add(storages[5]);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[8].getDatanodeDescriptor(), null);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[1]) || excessReplicas.contains(storages[4]));
// No delete hint, different excess type deletion
nonExcess.clear();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[2]);
nonExcess.add(storages[3]);
DatanodeStorageInfo excessStorage = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-ID", "localhost", delHintNode.getNetworkLocation(), "foo.com", StorageType.ARCHIVE, delHintNode.getUpgradeDomain());
nonExcess.add(excessStorage);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), null);
assertTrue(excessReplicas.size() == 2);
assertTrue(excessReplicas.contains(storages[0]));
assertTrue(excessReplicas.contains(excessStorage));
// Test SSD related deletion. With different rack settings here, but
// similar to {@link TestReplicationPolicy#testChooseReplicasToDelete}.
// The block was initially created on excessSSD(rack r1, UD 4),
// storages[7](rack r3, UD 2) and storages[8](rack r3, UD 3) with
// ONESSD_STORAGE_POLICY_NAME storage policy. Replication factor = 3.
// Right after balancer moves the block from storages[7] to
// storages[3](rack r2, UD 1), the application changes the storage policy
// from ONESSD_STORAGE_POLICY_NAME to HOT_STORAGE_POLICY_ID. In this case,
// we should be able to delete excessSSD since the remaining
// storages ({storages[3]}, {storages[7], storages[8]})
// are on different racks (r2, r3) and different UDs (1, 2, 3).
DatanodeStorageInfo excessSSD = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-SSD-ID", "localhost", storages[0].getDatanodeDescriptor().getNetworkLocation(), "foo.com", StorageType.SSD, null);
DatanodeStorageInfo[] ssds = { excessSSD };
DatanodeDescriptor[] ssdNodes = DFSTestUtil.toDatanodeDescriptor(ssds);
ssdNodes[0].setUpgradeDomain(Integer.toString(4));
nonExcess.clear();
nonExcess.add(excessSSD);
nonExcess.add(storages[3]);
nonExcess.add(storages[7]);
nonExcess.add(storages[8]);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), storages[7].getDatanodeDescriptor());
assertEquals(1, excessReplicas.size());
assertTrue(excessReplicas.contains(excessSSD));
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hbase by apache.
the class HFileSystem method getStoragePolicyForOldHDFSVersion.
/**
* Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
* to keep compatible with it. See HADOOP-12161 for more details.
* @param path Path to get storage policy against
* @return the storage policy name
*/
private String getStoragePolicyForOldHDFSVersion(Path path) {
try {
if (this.fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
if (null != status) {
if (unspecifiedStoragePolicyId < 0) {
// Get the unspecified id field through reflection to avoid compilation error.
// In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
// HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId != unspecifiedStoragePolicyId) {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
if (policy.getId() == storagePolicyId) {
return policy.getName();
}
}
}
}
}
} catch (Throwable e) {
LOG.warn("failed to get block storage policy of [" + path + "]", e);
}
return null;
}
Aggregations