use of org.apache.hadoop.fs.BlockStoragePolicySpi in project hadoop by apache.
the class FSOperations method storagePoliciesToJSON.
@SuppressWarnings("unchecked")
private static JSONObject storagePoliciesToJSON(Collection<? extends BlockStoragePolicySpi> storagePolicies) {
JSONObject json = new JSONObject();
JSONArray jsonArray = new JSONArray();
JSONObject policies = new JSONObject();
if (storagePolicies != null) {
for (BlockStoragePolicySpi policy : storagePolicies) {
JSONObject policyMap = storagePolicyToJSON(policy);
jsonArray.add(policyMap);
}
}
policies.put(HttpFSFileSystem.STORAGE_POLICY_JSON, jsonArray);
json.put(HttpFSFileSystem.STORAGE_POLICIES_JSON, policies);
return json;
}
use of org.apache.hadoop.fs.BlockStoragePolicySpi in project hadoop by apache.
the class TestBlockStoragePolicy method testGetAllStoragePoliciesFromFs.
/**
* Verify that {@link FileSystem#getAllStoragePolicies} returns all
* known storage policies for DFS.
*
* @throws IOException
*/
@Test
public void testGetAllStoragePoliciesFromFs() throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
// Get policies via {@link FileSystem#getAllStoragePolicies}
Set<String> policyNamesSet1 = new HashSet<>();
for (BlockStoragePolicySpi policy : cluster.getFileSystem().getAllStoragePolicies()) {
policyNamesSet1.add(policy.getName());
}
// Get policies from the default BlockStoragePolicySuite.
BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
Set<String> policyNamesSet2 = new HashSet<>();
for (BlockStoragePolicy policy : suite.getAllPolicies()) {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} finally {
cluster.shutdown();
}
}
Aggregations