use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestReplication method checkFile.
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
Configuration conf = fileSys.getConf();
ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
FileStatus stat = fileSys.getFileStatus(name);
BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
// verify that rack locations match
assertTrue(blockLocations.length == locations.locatedBlockCount());
for (int i = 0; i < blockLocations.length; i++) {
LocatedBlock blk = locations.get(i);
DatanodeInfo[] datanodes = blk.getLocations();
String[] topologyPaths = blockLocations[i].getTopologyPaths();
assertTrue(topologyPaths.length == datanodes.length);
for (int j = 0; j < topologyPaths.length; j++) {
boolean found = false;
for (int k = 0; k < racks.length; k++) {
if (topologyPaths[j].startsWith(racks[k])) {
found = true;
break;
}
}
assertTrue(found);
}
}
boolean isOnSameRack = true, isNotOnSameRack = true;
for (LocatedBlock blk : locations.getLocatedBlocks()) {
DatanodeInfo[] datanodes = blk.getLocations();
if (datanodes.length <= 1)
break;
if (datanodes.length == 2) {
isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
break;
}
isOnSameRack = false;
isNotOnSameRack = false;
for (int i = 0; i < datanodes.length - 1; i++) {
LOG.info("datanode " + i + ": " + datanodes[i]);
boolean onRack = false;
for (int j = i + 1; j < datanodes.length; j++) {
if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
onRack = true;
}
}
if (onRack) {
isOnSameRack = true;
}
if (!onRack) {
isNotOnSameRack = true;
}
if (isOnSameRack && isNotOnSameRack)
break;
}
if (!isOnSameRack || !isNotOnSameRack)
break;
}
assertTrue(isOnSameRack);
assertTrue(isNotOnSameRack);
}
use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestSetrepIncreasing method setrep.
static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
try {
Path root = TestDFSShell.mkdir(fs, new Path("/test/setrep" + fromREP + "-" + toREP));
Path f = TestDFSShell.writeFile(fs, new Path(root, "foo"));
// Verify setrep for changing replication
{
String[] args = { "-setrep", "-w", "" + toREP, "" + f };
FsShell shell = new FsShell();
shell.setConf(conf);
try {
assertEquals(0, shell.run(args));
} catch (Exception e) {
assertTrue("-setrep " + e, false);
}
}
//get fs again since the old one may be closed
fs = cluster.getFileSystem();
FileStatus file = fs.getFileStatus(f);
long len = file.getLen();
for (BlockLocation locations : fs.getFileBlockLocations(file, 0, len)) {
assertTrue(locations.getHosts().length == toREP);
}
TestDFSShell.show("done setrep waiting: " + root);
} finally {
try {
fs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestSmallBlock method checkFile.
private void checkFile(DistributedFileSystem fileSys, Path name) throws IOException {
BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
assertEquals("Number of blocks", fileSize, locations.length);
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(), 0, fileSize);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read the file
byte[] actual = new byte[fileSize];
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close();
}
use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestWebHDFS method testWebHdfsGetBlockLocationsWithStorageType.
@Test
public void testWebHdfsGetBlockLocationsWithStorageType() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final int OFFSET = 42;
final int LENGTH = 512;
final Path PATH = new Path("/foo");
byte[] CONTENTS = new byte[1024];
RANDOM.nextBytes(CONTENTS);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
try (OutputStream os = fs.create(PATH)) {
os.write(CONTENTS);
}
BlockLocation[] locations = fs.getFileBlockLocations(PATH, OFFSET, LENGTH);
for (BlockLocation location : locations) {
StorageType[] storageTypes = location.getStorageTypes();
Assert.assertTrue(storageTypes != null && storageTypes.length > 0 && storageTypes[0] == StorageType.DISK);
}
// Query webhdfs REST API to get block locations
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
// Case 1
// URL without length or offset parameters
URL url1 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS");
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url1);
String response1 = getResponse(url1, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response1);
// Parse BlockLocation array from json output using object mapper
BlockLocation[] locationArray1 = toBlockLocationArray(response1);
// Verify the result from rest call is same as file system api
verifyEquals(locations, locationArray1);
// Case 2
// URL contains length and offset parameters
URL url2 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&length=" + LENGTH + "&offset=" + OFFSET);
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url2);
String response2 = getResponse(url2, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response2);
BlockLocation[] locationArray2 = toBlockLocationArray(response2);
verifyEquals(locations, locationArray2);
// Case 3
// URL contains length parameter but without offset parameters
URL url3 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&length=" + LENGTH);
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url3);
String response3 = getResponse(url3, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response3);
BlockLocation[] locationArray3 = toBlockLocationArray(response3);
verifyEquals(locations, locationArray3);
// Case 4
// URL contains offset parameter but without length parameter
URL url4 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&offset=" + OFFSET);
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url4);
String response4 = getResponse(url4, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response4);
BlockLocation[] locationArray4 = toBlockLocationArray(response4);
verifyEquals(locations, locationArray4);
// Case 5
// URL specifies offset exceeds the file length
URL url5 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&offset=1200");
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url5);
String response5 = getResponse(url5, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response5);
BlockLocation[] locationArray5 = toBlockLocationArray(response5);
// Expected an empty array of BlockLocation
verifyEquals(new BlockLocation[] {}, locationArray5);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestWebHDFS method toBlockLocationArray.
private BlockLocation[] toBlockLocationArray(String json) throws IOException {
ObjectMapper mapper = new ObjectMapper();
MapType subType = mapper.getTypeFactory().constructMapType(Map.class, String.class, BlockLocation[].class);
MapType rootType = mapper.getTypeFactory().constructMapType(Map.class, mapper.constructType(String.class), mapper.constructType(subType));
Map<String, Map<String, BlockLocation[]>> jsonMap = mapper.readValue(json, rootType);
Map<String, BlockLocation[]> locationMap = jsonMap.get("BlockLocations");
BlockLocation[] locationArray = locationMap.get(BlockLocation.class.getSimpleName());
return locationArray;
}
Aggregations