use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestDecommissionWithStriped method getDecommissionDatanode.
private List<DatanodeInfo> getDecommissionDatanode(DistributedFileSystem dfs, Path ecFile, int writeBytes, int decomNodeCount) throws IOException {
ArrayList<DatanodeInfo> decommissionedNodes = new ArrayList<>();
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
BlockLocation[] fileBlockLocations = dfs.getFileBlockLocations(ecFile, 0, writeBytes);
for (String dnName : fileBlockLocations[0].getNames()) {
for (DatanodeInfo dn : info) {
if (dnName.equals(dn.getXferAddr())) {
decommissionedNodes.add(dn);
}
if (decommissionedNodes.size() >= decomNodeCount) {
return decommissionedNodes;
}
}
}
return decommissionedNodes;
}
use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestSaslDataTransfer method doTest.
/**
* Tests DataTransferProtocol with the given client configuration.
*
* @param conf client configuration
* @throws IOException if there is an I/O error
*/
private void doTest(HdfsConfiguration conf) throws IOException {
fs = FileSystem.get(cluster.getURI(), conf);
FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE), DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0, Long.MAX_VALUE);
assertNotNull(blockLocations);
assertEquals(NUM_BLOCKS, blockLocations.length);
for (BlockLocation blockLocation : blockLocations) {
assertNotNull(blockLocation.getHosts());
assertEquals(3, blockLocation.getHosts().length);
}
}
use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.
the class TestHostsFiles method testHostsExcludeInUI.
@Test
public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
LOG.info("adding '" + name + "' to decommission");
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
use of org.apache.hadoop.fs.BlockLocation in project lucene-solr by apache.
the class HdfsLocalityReporter method initializeMetrics.
/**
* Provide statistics on HDFS block locality, both in terms of bytes and block counts.
*/
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) {
registry = manager.registry(registryName);
MetricsMap metricsMap = new MetricsMap((detailed, map) -> {
long totalBytes = 0;
long localBytes = 0;
int totalCount = 0;
int localCount = 0;
for (Iterator<HdfsDirectory> iterator = cache.keySet().iterator(); iterator.hasNext(); ) {
HdfsDirectory hdfsDirectory = iterator.next();
if (hdfsDirectory.isClosed()) {
iterator.remove();
} else {
try {
refreshDirectory(hdfsDirectory);
Map<FileStatus, BlockLocation[]> blockMap = cache.get(hdfsDirectory);
// For every block in every file in this directory, count it
for (BlockLocation[] locations : blockMap.values()) {
for (BlockLocation bl : locations) {
totalBytes += bl.getLength();
totalCount++;
if (Arrays.asList(bl.getHosts()).contains(hostname)) {
localBytes += bl.getLength();
localCount++;
}
}
}
} catch (IOException e) {
logger.warn("Could not retrieve locality information for {} due to exception: {}", hdfsDirectory.getHdfsDirPath(), e);
}
}
}
map.put(LOCALITY_BYTES_TOTAL, totalBytes);
map.put(LOCALITY_BYTES_LOCAL, localBytes);
if (localBytes == 0) {
map.put(LOCALITY_BYTES_RATIO, 0);
} else {
map.put(LOCALITY_BYTES_RATIO, localBytes / (double) totalBytes);
}
map.put(LOCALITY_BLOCKS_TOTAL, totalCount);
map.put(LOCALITY_BLOCKS_LOCAL, localCount);
if (localCount == 0) {
map.put(LOCALITY_BLOCKS_RATIO, 0);
} else {
map.put(LOCALITY_BLOCKS_RATIO, localCount / (double) totalCount);
}
});
manager.registerGauge(this, registryName, metricsMap, true, "hdfsLocality", getCategory().toString(), scope);
}
use of org.apache.hadoop.fs.BlockLocation in project lucene-solr by apache.
the class HdfsLocalityReporter method refreshDirectory.
/**
* Update the cached block locations for the given directory. This includes deleting any files that no longer exist in
* the file system and adding any new files that have shown up.
*
* @param dir
* The directory to refresh
* @throws IOException
* If there is a problem getting info from HDFS
*/
private void refreshDirectory(HdfsDirectory dir) throws IOException {
Map<FileStatus, BlockLocation[]> directoryCache = cache.get(dir);
Set<FileStatus> cachedStatuses = directoryCache.keySet();
FileSystem fs = dir.getFileSystem();
FileStatus[] statuses = fs.listStatus(dir.getHdfsDirPath());
List<FileStatus> statusList = Arrays.asList(statuses);
logger.debug("Updating locality information for: {}", statusList);
// Keep only the files that still exist
cachedStatuses.retainAll(statusList);
// Fill in missing entries in the cache
for (FileStatus status : statusList) {
if (!status.isDirectory() && !directoryCache.containsKey(status)) {
BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen());
directoryCache.put(status, locations);
}
}
}
Aggregations