use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class BlockManager method chooseTarget4AdditionalDatanode.
/** Choose target for getting additional datanodes for an existing pipeline. */
public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src, int numAdditionalNodes, Node clientnode, List<DatanodeStorageInfo> chosen, Set<Node> excludes, long blocksize, byte storagePolicyID, BlockType blockType) {
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(blockType);
return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode, chosen, true, excludes, blocksize, storagePolicy, null);
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class HttpFSFileSystem method createStoragePolicies.
private Collection<BlockStoragePolicy> createStoragePolicies(JSONObject map) throws IOException {
JSONArray jsonArray = (JSONArray) map.get(STORAGE_POLICY_JSON);
BlockStoragePolicy[] policies = new BlockStoragePolicy[jsonArray.size()];
for (int i = 0; i < jsonArray.size(); i++) {
policies[i] = createStoragePolicy((JSONObject) jsonArray.get(i));
}
return Arrays.asList(policies);
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class FSOperations method storagePolicyToJSON.
@SuppressWarnings({ "unchecked" })
private static JSONObject storagePolicyToJSON(BlockStoragePolicySpi policy) {
BlockStoragePolicy p = (BlockStoragePolicy) policy;
JSONObject policyJson = new JSONObject();
policyJson.put("id", p.getId());
policyJson.put("name", p.getName());
policyJson.put("storageTypes", toJsonArray(p.getStorageTypes()));
policyJson.put("creationFallbacks", toJsonArray(p.getCreationFallbacks()));
policyJson.put("replicationFallbacks", toJsonArray(p.getReplicationFallbacks()));
policyJson.put("copyOnCreateFile", p.isCopyOnCreateFile());
return policyJson;
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class TestStorageMover method testMoveSpecificPaths.
/**
* Run Mover with arguments specifying files and directories
*/
@Test
public void testMoveSpecificPaths() throws Exception {
LOG.info("testMoveSpecificPaths");
final Path foo = new Path("/foo");
final Path barFile = new Path(foo, "bar");
final Path foo2 = new Path("/foo2");
final Path bar2File = new Path(foo2, "bar2");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(foo, COLD);
policyMap.put(foo2, WARM);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(foo, foo2), Arrays.asList(barFile, bar2File), BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
try {
test.prepareNamespace();
test.setStoragePolicy();
Map<URI, List<Path>> map = Mover.Cli.getNameNodePathsToMove(test.conf, "-p", "/foo/bar", "/foo2");
int result = Mover.run(map, test.conf);
Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
Thread.sleep(5000);
test.verify(true);
} finally {
test.shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.
the class TestStorageMover method testMigrateFileToArchival.
/**
* A normal case for Mover: move a file into archival storage
*/
@Test
public void testMigrateFileToArchival() throws Exception {
LOG.info("testMigrateFileToArchival");
final Path foo = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(foo, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(null, Arrays.asList(foo), 2 * BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
new MigrationTest(clusterScheme, nsScheme).runBasicTest(true);
}
Aggregations