use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestBlocksScheduledCounter method testScheduledBlocksCounterShouldDecrementOnAbandonBlock.
/**
* Abandon block should decrement the scheduledBlocks count for the dataNode.
*/
@Test
public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock() throws Exception {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
datanodeManager.fetchDatanodes(dnList, dnList, false);
for (DatanodeDescriptor descriptor : dnList) {
assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(), 0, descriptor.getBlocksScheduled());
}
cluster.getDataNodes().get(0).shutdown();
// open a file an write a few bytes:
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"), (short) 2);
for (int i = 0; i < 1024; i++) {
out.write(i);
}
// flush to make sure a block is allocated.
out.hflush();
DatanodeDescriptor abandonedDn = datanodeManager.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
assertEquals("for the abandoned dn scheduled counts should be 0", 0, abandonedDn.getBlocksScheduled());
for (DatanodeDescriptor descriptor : dnList) {
if (descriptor.equals(abandonedDn)) {
continue;
}
assertEquals("Blocks scheduled should be 1 for " + descriptor.getName(), 1, descriptor.getBlocksScheduled());
}
// close the file and the counter should go to zero.
out.close();
for (DatanodeDescriptor descriptor : dnList) {
assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(), 0, descriptor.getBlocksScheduled());
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestBlocksScheduledCounter method testBlocksScheduledCounter.
@Test
public void testBlocksScheduledCounter() throws IOException {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
cluster.waitActive();
fs = cluster.getFileSystem();
//open a file an write a few bytes:
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
for (int i = 0; i < 1024; i++) {
out.write(i);
}
// flush to make sure a block is allocated.
out.hflush();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.fetchDatanodes(dnList, dnList, false);
DatanodeDescriptor dn = dnList.get(0);
assertEquals(1, dn.getBlocksScheduled());
// close the file and the counter should go to zero.
out.close();
assertEquals(0, dn.getBlocksScheduled());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class FSNamesystem method datanodeReport.
DatanodeInfo[] datanodeReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i = 0; i < arr.length; i++) {
arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i)).build();
}
return arr;
} finally {
readUnlock("datanodeReport");
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class FSNamesystem method getAdditionalDatanode.
/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId, final ExtendedBlock blk, final DatanodeInfo[] existings, final String[] storageIDs, final Set<Node> excludes, final int numAdditionalNodes, final String clientName) throws IOException {
//check if the feature is enabled
dtpReplaceDatanodeOnFailure.checkEnabled();
Node clientnode = null;
String clientMachine;
final long preferredblocksize;
final byte storagePolicyID;
final List<DatanodeStorageInfo> chosen;
final BlockType blockType;
checkOperation(OperationCategory.READ);
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
//check safe mode
checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
final INodesInPath iip = dir.resolvePath(pc, src, fileId);
src = iip.getPath();
//check lease
final INodeFile file = checkLease(iip, clientName, fileId);
clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
preferredblocksize = file.getPreferredBlockSize();
storagePolicyID = file.getStoragePolicyID();
blockType = file.getBlockType();
//find datanode storages
final DatanodeManager dm = blockManager.getDatanodeManager();
chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs, "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s", src, fileId, blk, clientName, clientMachine));
} finally {
readUnlock("getAdditionalDatanode");
}
if (clientnode == null) {
clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
}
// choose new datanodes.
final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType);
final LocatedBlock lb = BlockManager.newLocatedBlock(blk, targets, -1, false);
blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
return lb;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestFsck method testFsckFileNotFound.
/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short numReplicas = 1;
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String, String[]> pmap = new HashMap<>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
FSDirectory fsd = mock(FSDirectory.class);
BlockManager blockManager = mock(BlockManager.class);
DatanodeManager dnManager = mock(DatanodeManager.class);
INodesInPath iip = mock(INodesInPath.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockManager()).thenReturn(blockManager);
when(fsName.getFSDirectory()).thenReturn(fsd);
when(fsd.getFSNamesystem()).thenReturn(fsName);
when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte[] symlink = null;
byte[] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
try {
fsck.check(pathString, file, replRes, ecRes);
} catch (Exception e) {
fail("Unexpected exception " + e.getMessage());
}
assertTrue(replRes.isHealthy());
}
Aggregations