Search in sources :

Example 16 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class NameNodeRpcServer method blockReport.

// DatanodeProtocol
@Override
public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, String poolId, final StorageBlockReport[] reports, final BlockReportContext context) throws IOException {
    checkNNStartup();
    verifyRequest(nodeReg);
    if (blockStateChangeLog.isDebugEnabled()) {
        blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + ", reports.length=" + reports.length);
    }
    final BlockManager bm = namesystem.getBlockManager();
    boolean noStaleStorages = false;
    for (int r = 0; r < reports.length; r++) {
        final BlockListAsLongs blocks = reports[r].getBlocks();
        //
        // BlockManager.processReport accumulates information of prior calls
        // for the same node and storage, so the value returned by the last
        // call of this loop is the final updated value for noStaleStorage.
        //
        final int index = r;
        noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {

            @Override
            public Boolean call() throws IOException {
                return bm.processReport(nodeReg, reports[index].getStorage(), blocks, context);
            }
        });
        metrics.incrStorageBlockReportOps();
    }
    bm.removeBRLeaseIfNeeded(nodeReg, context);
    BlockManagerFaultInjector.getInstance().incomingBlockReportRpc(nodeReg, context);
    if (nn.getFSImage().isUpgradeFinalized() && !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) {
        return new FinalizeCommand(poolId);
    }
    return null;
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FinalizeCommand(org.apache.hadoop.hdfs.server.protocol.FinalizeCommand) Callable(java.util.concurrent.Callable)

Example 17 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestFileCorruption method testFileCorruption.

/** check if DFS can handle corrupted blocks properly */
@Test
public void testFileCorruption() throws Exception {
    MiniDFSCluster cluster = null;
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        FileSystem fs = cluster.getFileSystem();
        util.createFiles(fs, "/srcdat");
        // Now deliberately remove the blocks
        String bpid = cluster.getNamesystem().getBlockPoolId();
        DataNode dn = cluster.getDataNodes().get(2);
        Map<DatanodeStorage, BlockListAsLongs> blockReports = dn.getFSDataset().getBlockReports(bpid);
        assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
        for (BlockListAsLongs report : blockReports.values()) {
            for (BlockReportReplica brr : report) {
                LOG.info("Deliberately removing block {}", brr.getBlockName());
                cluster.getFsDatasetTestUtils(2).getMaterializedReplica(new ExtendedBlock(bpid, brr)).deleteData();
            }
        }
        assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
        util.cleanup(fs, "/srcdat");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Test(org.junit.Test)

Example 18 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method getNumBlocksReport.

private List<List<Integer>> getNumBlocksReport(int namesystemIdx) {
    List<List<Integer>> results = new ArrayList<List<Integer>>();
    final String bpid = cluster.getNamesystem(namesystemIdx).getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
    for (Map<DatanodeStorage, BlockListAsLongs> datanodeReport : blockReports) {
        List<Integer> numBlocksPerDN = new ArrayList<Integer>();
        for (BlockListAsLongs blocks : datanodeReport.values()) {
            numBlocksPerDN.add(blocks.getNumberOfBlocks());
        }
        results.add(numBlocksPerDN);
    }
    return results;
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ArrayList(java.util.ArrayList) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) List(java.util.List) ArrayList(java.util.ArrayList) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Map(java.util.Map)

Example 19 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testAddVolumesDuringWrite.

@Test(timeout = 60000)
public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, ReconfigurationException {
    startDFSCluster(1, 1);
    int numVolumes = cluster.getStoragesPerDatanode();
    String bpid = cluster.getNamesystem().getBlockPoolId();
    Path testFile = new Path("/test");
    // Each volume has 2 blocks
    int initialBlockCount = numVolumes * 2;
    createFile(testFile, initialBlockCount);
    int newVolumeCount = 5;
    addVolumes(newVolumeCount);
    numVolumes += newVolumeCount;
    int additionalBlockCount = 9;
    int totalBlockCount = initialBlockCount + additionalBlockCount;
    // Continue to write the same file, thus the new volumes will have blocks.
    DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * additionalBlockCount);
    verifyFileLength(cluster.getFileSystem(), testFile, totalBlockCount);
    // After appending data, each new volume added should
    // have 1 block each.
    List<Integer> expectedNumBlocks = Arrays.asList(1, 1, 1, 1, 1, 4, 4);
    List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
    // 1 DataNode
    assertEquals(1, blockReports.size());
    // 7 volumes
    assertEquals(numVolumes, blockReports.get(0).size());
    Map<DatanodeStorage, BlockListAsLongs> dnReport = blockReports.get(0);
    List<Integer> actualNumBlocks = new ArrayList<Integer>();
    for (BlockListAsLongs blockList : dnReport.values()) {
        actualNumBlocks.add(blockList.getNumberOfBlocks());
    }
    Collections.sort(actualNumBlocks);
    assertEquals(expectedNumBlocks, actualNumBlocks);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ArrayList(java.util.ArrayList) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Map(java.util.Map) Test(org.junit.Test)

Example 20 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testAddOneNewVolume.

/**
   * Test adding one volume on a running MiniDFSCluster with only one NameNode.
   */
@Test(timeout = 60000)
public void testAddOneNewVolume() throws IOException, ReconfigurationException, InterruptedException, TimeoutException {
    startDFSCluster(1, 1);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    final int numBlocks = 10;
    addVolumes(1);
    Path testFile = new Path("/test");
    createFile(testFile, numBlocks);
    List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
    // 1 DataNode
    assertEquals(1, blockReports.size());
    // 3 volumes
    assertEquals(3, blockReports.get(0).size());
    // FSVolumeList uses Round-Robin block chooser by default. Thus the new
    // blocks should be evenly located in all volumes.
    int minNumBlocks = Integer.MAX_VALUE;
    int maxNumBlocks = Integer.MIN_VALUE;
    for (BlockListAsLongs blockList : blockReports.get(0).values()) {
        minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks());
        maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks());
    }
    assertTrue(Math.abs(maxNumBlocks - maxNumBlocks) <= 1);
    verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Map(java.util.Map) Test(org.junit.Test)

Aggregations

BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)23 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)12 Test (org.junit.Test)11 ArrayList (java.util.ArrayList)8 Map (java.util.Map)8 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)8 Path (org.apache.hadoop.fs.Path)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)5 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)4 Matchers.anyString (org.mockito.Matchers.anyString)4 IOException (java.io.IOException)3 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)3 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)3 ServiceException (com.google.protobuf.ServiceException)2 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2