use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class NameNodeRpcServer method blockReport.
// DatanodeProtocol
@Override
public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, String poolId, final StorageBlockReport[] reports, final BlockReportContext context) throws IOException {
checkNNStartup();
verifyRequest(nodeReg);
if (blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + ", reports.length=" + reports.length);
}
final BlockManager bm = namesystem.getBlockManager();
boolean noStaleStorages = false;
for (int r = 0; r < reports.length; r++) {
final BlockListAsLongs blocks = reports[r].getBlocks();
//
// BlockManager.processReport accumulates information of prior calls
// for the same node and storage, so the value returned by the last
// call of this loop is the final updated value for noStaleStorage.
//
final int index = r;
noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
return bm.processReport(nodeReg, reports[index].getStorage(), blocks, context);
}
});
metrics.incrStorageBlockReportOps();
}
bm.removeBRLeaseIfNeeded(nodeReg, context);
BlockManagerFaultInjector.getInstance().incomingBlockReportRpc(nodeReg, context);
if (nn.getFSImage().isUpgradeFinalized() && !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) {
return new FinalizeCommand(poolId);
}
return null;
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class TestFileCorruption method testFileCorruption.
/** check if DFS can handle corrupted blocks properly */
@Test
public void testFileCorruption() throws Exception {
MiniDFSCluster cluster = null;
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
util.createFiles(fs, "/srcdat");
// Now deliberately remove the blocks
String bpid = cluster.getNamesystem().getBlockPoolId();
DataNode dn = cluster.getDataNodes().get(2);
Map<DatanodeStorage, BlockListAsLongs> blockReports = dn.getFSDataset().getBlockReports(bpid);
assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
for (BlockListAsLongs report : blockReports.values()) {
for (BlockReportReplica brr : report) {
LOG.info("Deliberately removing block {}", brr.getBlockName());
cluster.getFsDatasetTestUtils(2).getMaterializedReplica(new ExtendedBlock(bpid, brr)).deleteData();
}
}
assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
util.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method getNumBlocksReport.
private List<List<Integer>> getNumBlocksReport(int namesystemIdx) {
List<List<Integer>> results = new ArrayList<List<Integer>>();
final String bpid = cluster.getNamesystem(namesystemIdx).getBlockPoolId();
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
for (Map<DatanodeStorage, BlockListAsLongs> datanodeReport : blockReports) {
List<Integer> numBlocksPerDN = new ArrayList<Integer>();
for (BlockListAsLongs blocks : datanodeReport.values()) {
numBlocksPerDN.add(blocks.getNumberOfBlocks());
}
results.add(numBlocksPerDN);
}
return results;
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method testAddVolumesDuringWrite.
@Test(timeout = 60000)
public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, ReconfigurationException {
startDFSCluster(1, 1);
int numVolumes = cluster.getStoragesPerDatanode();
String bpid = cluster.getNamesystem().getBlockPoolId();
Path testFile = new Path("/test");
// Each volume has 2 blocks
int initialBlockCount = numVolumes * 2;
createFile(testFile, initialBlockCount);
int newVolumeCount = 5;
addVolumes(newVolumeCount);
numVolumes += newVolumeCount;
int additionalBlockCount = 9;
int totalBlockCount = initialBlockCount + additionalBlockCount;
// Continue to write the same file, thus the new volumes will have blocks.
DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * additionalBlockCount);
verifyFileLength(cluster.getFileSystem(), testFile, totalBlockCount);
// After appending data, each new volume added should
// have 1 block each.
List<Integer> expectedNumBlocks = Arrays.asList(1, 1, 1, 1, 1, 4, 4);
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
// 1 DataNode
assertEquals(1, blockReports.size());
// 7 volumes
assertEquals(numVolumes, blockReports.get(0).size());
Map<DatanodeStorage, BlockListAsLongs> dnReport = blockReports.get(0);
List<Integer> actualNumBlocks = new ArrayList<Integer>();
for (BlockListAsLongs blockList : dnReport.values()) {
actualNumBlocks.add(blockList.getNumberOfBlocks());
}
Collections.sort(actualNumBlocks);
assertEquals(expectedNumBlocks, actualNumBlocks);
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method testAddOneNewVolume.
/**
* Test adding one volume on a running MiniDFSCluster with only one NameNode.
*/
@Test(timeout = 60000)
public void testAddOneNewVolume() throws IOException, ReconfigurationException, InterruptedException, TimeoutException {
startDFSCluster(1, 1);
String bpid = cluster.getNamesystem().getBlockPoolId();
final int numBlocks = 10;
addVolumes(1);
Path testFile = new Path("/test");
createFile(testFile, numBlocks);
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
// 1 DataNode
assertEquals(1, blockReports.size());
// 3 volumes
assertEquals(3, blockReports.get(0).size());
// FSVolumeList uses Round-Robin block chooser by default. Thus the new
// blocks should be evenly located in all volumes.
int minNumBlocks = Integer.MAX_VALUE;
int maxNumBlocks = Integer.MIN_VALUE;
for (BlockListAsLongs blockList : blockReports.get(0).values()) {
minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks());
maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks());
}
assertTrue(Math.abs(maxNumBlocks - maxNumBlocks) <= 1);
verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
}
Aggregations