use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestBlockUnderConstruction method testGetBlockLocations.
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for (int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertFalse(blockManager.getStoredBlock(b).isComplete());
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndCorruptStripedBlock.
@Test
public void testProcessOverReplicatedAndCorruptStripedBlock() throws Exception {
long fileLen = dataBlocks * blockSize;
DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
long gs = bg.getBlock().getGenerationStamp();
String bpid = bg.getBlock().getBlockPoolId();
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
BlockInfoStriped blockInfo = new BlockInfoStriped(blk, StripedFileTestUtil.getDefaultECPolicy());
for (int i = 0; i < groupSize; i++) {
blk.setBlockId(groupId + i);
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
}
cluster.triggerBlockReports();
// let a internal block be corrupt
BlockManager bm = cluster.getNamesystem().getBlockManager();
List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
List<String> storages = Arrays.asList(bg.getStorageIDs());
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(), infos.get(0), storages.get(0), "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo)).corruptReplicas());
// let a internal block be over replicated with 2 redundant block.
blk.setBlockId(groupId + 2);
cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
// update blocksMap
cluster.triggerBlockReports();
// verify that all internal blocks exists except b0
// the redundant internal blocks will not be deleted before the corrupted
// block gets reconstructed. but since we set
// DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY to 0, the reconstruction will
// not happen
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
bg = (LocatedStripedBlock) (lbs.get(0));
assertEquals(groupSize + 1, bg.getBlockIndices().length);
assertEquals(groupSize + 1, bg.getLocations().length);
BitSet set = new BitSet(groupSize);
for (byte index : bg.getBlockIndices()) {
set.set(index);
}
Assert.assertFalse(set.get(0));
for (int i = 1; i < groupSize; i++) {
assertTrue(set.get(i));
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestAddStripedBlockInFBR method testAddBlockInFullBlockReport.
@Test
public void testAddBlockInFullBlockReport() throws Exception {
BlockManager spy = Mockito.spy(cluster.getNamesystem().getBlockManager());
// let NN ignore one DataNode's IBR
final DataNode dn = cluster.getDataNodes().get(0);
final DatanodeID datanodeID = dn.getDatanodeId();
Mockito.doNothing().when(spy).processIncrementalBlockReport(Mockito.eq(datanodeID), Mockito.any());
Whitebox.setInternalState(cluster.getNamesystem(), "blockManager", spy);
final Path ecDir = new Path("/ec");
final Path repDir = new Path("/rep");
dfs.mkdirs(ecDir);
dfs.mkdirs(repDir);
dfs.getClient().setErasureCodingPolicy(ecDir.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
// create several non-EC files and one EC file
final Path[] repFiles = new Path[groupSize];
for (int i = 0; i < groupSize; i++) {
repFiles[i] = new Path(repDir, "f" + i);
DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
}
final Path ecFile = new Path(ecDir, "f");
DFSTestUtil.createFile(dfs, ecFile, cellSize * dataBlocks, (short) 1, 0L);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
// trigger dn's FBR. The FBR will add block-dn mapping.
cluster.triggerBlockReports();
// make sure NN has correct block-dn mapping
BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem().getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
NumberReplicas nr = spy.countNodes(blockInfo);
return nr.excessReplicas() == 0 && nr.liveReplicas() == groupSize;
} catch (Exception ignored) {
// Ignore the exception
}
return false;
}
}, 3000, 60000);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.
@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
final int numBlocks = 4;
final int numStripes = 4;
final Path filePath = new Path("/corrupt");
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final BlockManager bm = ns.getBlockManager();
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
Assert.assertTrue(fileNode.isStriped());
BlockInfo stored = fileNode.getBlocks()[0];
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
// Now send a block report with correct size
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
final Block reported = new Block(stored);
reported.setNumBytes(numStripes * cellSize);
StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
// Now send a block report with wrong size
reported.setBlockId(stored.getBlockId() + 1);
reported.setNumBytes(numStripes * cellSize - 1);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// Now send a parity block report with correct size
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes(numStripes * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// Now send a parity block report with wrong size
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes(numStripes * cellSize + 1);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
// the total number of corrupted block info is still 1
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// 2 internal blocks corrupted
Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
// Now change the size of stored block, and test verifying the last
// block size
stored.setNumBytes(stored.getNumBytes() + 10);
reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
reported.setNumBytes(numStripes * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
// Now send a parity block report with correct size based on adjusted
// size of stored block
/** Now stored block has {@link numStripes} full stripes + a cell + 10 */
stored.setNumBytes(stored.getNumBytes() + cellSize);
reported.setBlockId(stored.getBlockId());
reported.setNumBytes((numStripes + 1) * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
reported.setBlockId(stored.getBlockId() + 1);
reported.setNumBytes(numStripes * cellSize + 10);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes((numStripes + 1) * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class FSDirAttrOp method unprotectedSetReplication.
static BlockInfo[] unprotectedSetReplication(FSDirectory fsd, INodesInPath iip, short replication) throws QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException, UnsupportedActionException {
assert fsd.hasWriteLock();
final BlockManager bm = fsd.getBlockManager();
final INode inode = iip.getLastINode();
if (inode == null || !inode.isFile() || inode.asFile().isStriped()) {
// TODO we do not support replication on stripe layout files yet
return null;
}
INodeFile file = inode.asFile();
// Make sure the directory has sufficient quotas
short oldBR = file.getPreferredBlockReplication();
long size = file.computeFileSize(true, true);
// Ensure the quota does not exceed
if (oldBR < replication) {
fsd.updateCount(iip, 0L, size, oldBR, replication, true);
}
file.setFileReplication(replication, iip.getLatestSnapshotId());
short targetReplication = (short) Math.max(replication, file.getPreferredBlockReplication());
if (oldBR > replication) {
fsd.updateCount(iip, 0L, size, oldBR, targetReplication, true);
}
for (BlockInfo b : file.getBlocks()) {
bm.setReplication(oldBR, targetReplication, b);
}
if (oldBR != -1) {
if (oldBR > targetReplication) {
FSDirectory.LOG.info("Decreasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
} else {
FSDirectory.LOG.info("Increasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
}
}
return file.getBlocks();
}
Aggregations