use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestPendingReconstruction method testBlockReceived.
/**
* Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
* pending reconstruction. Also make sure the blockReceivedAndDeleted call is
* idempotent to the pending reconstruction.
*/
@Test
public void testBlockReceived() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
FSNamesystem fsn = cluster.getNamesystem();
BlockManager blkManager = fsn.getBlockManager();
final String file = "/tmp.txt";
final Path filePath = new Path(file);
short replFactor = 1;
DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
// temporarily stop the heartbeat
ArrayList<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
}
hdfs.setReplication(filePath, (short) DATANODE_COUNT);
BlockManagerTestUtil.computeAllPendingWork(blkManager);
assertEquals(1, blkManager.pendingReconstruction.size());
INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
BlockInfo[] blocks = fileNode.getBlocks();
assertEquals(DATANODE_COUNT - 1, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0).get(0);
DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
int reportDnNum = 0;
String poolId = cluster.getNamesystem().getBlockPoolId();
// report to NN
for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
reportDnNum++;
}
}
// IBRs are async, make sure the NN processes all of them.
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
// let the same datanodes report again
for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
reportDnNum++;
}
}
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
// re-enable heartbeat for the datanode that has data
for (int i = 0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), false);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
}
Thread.sleep(5000);
assertEquals(0, blkManager.pendingReconstruction.size());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class PBHelperClient method convertLocatedBlock.
public static LocatedBlockProto convertLocatedBlock(LocatedBlock b) {
if (b == null)
return null;
Builder builder = LocatedBlockProto.newBuilder();
DatanodeInfo[] locs = b.getLocations();
List<DatanodeInfo> cachedLocs = Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
for (int i = 0; i < locs.length; i++) {
DatanodeInfo loc = locs[i];
builder.addLocs(i, PBHelperClient.convert(loc));
boolean locIsCached = cachedLocs.contains(loc);
builder.addIsCached(locIsCached);
if (locIsCached) {
cachedLocs.remove(loc);
}
}
Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!");
StorageType[] storageTypes = b.getStorageTypes();
if (storageTypes != null) {
for (StorageType storageType : storageTypes) {
builder.addStorageTypes(convertStorageType(storageType));
}
}
final String[] storageIDs = b.getStorageIDs();
if (storageIDs != null) {
builder.addAllStorageIDs(Arrays.asList(storageIDs));
}
if (b instanceof LocatedStripedBlock) {
LocatedStripedBlock sb = (LocatedStripedBlock) b;
byte[] indices = sb.getBlockIndices();
builder.setBlockIndices(PBHelperClient.getByteString(indices));
Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
builder.addAllBlockTokens(convert(blockTokens));
}
return builder.setB(PBHelperClient.convert(b.getBlock())).setBlockToken(PBHelperClient.convert(b.getBlockToken())).setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class BlockPlacementPolicyDefault method verifyBlockPlacement.
@Override
public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs, int numberOfReplicas) {
if (locs == null)
locs = DatanodeDescriptor.EMPTY_ARRAY;
if (!clusterMap.hasClusterEverBeenMultiRack()) {
// only one rack
return new BlockPlacementStatusDefault(1, 1, 1);
}
int minRacks = 2;
minRacks = Math.min(minRacks, numberOfReplicas);
// 1. Check that all locations are different.
// 2. Count locations on different racks.
Set<String> racks = new TreeSet<>();
for (DatanodeInfo dn : locs) racks.add(dn.getNetworkLocation());
return new BlockPlacementStatusDefault(racks.size(), minRacks, clusterMap.getNumOfRacks());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class BlockManager method computeInvalidateWork.
/**
* Schedule blocks for deletion at datanodes
* @param nodesToProcess number of datanodes to schedule deletion work
* @return total number of block for deletion
*/
int computeInvalidateWork(int nodesToProcess) {
final List<DatanodeInfo> nodes = invalidateBlocks.getDatanodes();
Collections.shuffle(nodes);
nodesToProcess = Math.min(nodes.size(), nodesToProcess);
int blockCnt = 0;
for (DatanodeInfo dnInfo : nodes) {
int blocks = invalidateWorkForOneNode(dnInfo);
if (blocks > 0) {
blockCnt += blocks;
if (--nodesToProcess == 0) {
break;
}
}
}
return blockCnt;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class BlockManager method isPlacementPolicySatisfied.
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) {
List<DatanodeDescriptor> liveNodes = new ArrayList<>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(storedBlock);
for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
// rack policy point of view.
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) {
liveNodes.add(cur);
}
}
DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]);
BlockType blockType = storedBlock.getBlockType();
BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(blockType);
int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock).getRealTotalBlockNum() : storedBlock.getReplication();
return placementPolicy.verifyBlockPlacement(locs, numReplicas).isPlacementPolicySatisfied();
}
Aggregations