Search in sources :

Example 1 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class BlockReportTestBase method getBlockReports.

// Generate a block report, optionally corrupting the generation
// stamp and/or length of one block.
private static StorageBlockReport[] getBlockReports(DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) {
    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid);
    // Send block report
    StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()];
    boolean corruptedGs = false;
    boolean corruptedLen = false;
    int reportIndex = 0;
    for (Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
        DatanodeStorage dnStorage = kvPair.getKey();
        BlockListAsLongs blockList = kvPair.getValue();
        // Walk the list of blocks until we find one each to corrupt the
        // generation stamp and length, if so requested.
        BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
        for (BlockReportReplica block : blockList) {
            if (corruptOneBlockGs && !corruptedGs) {
                long gsOld = block.getGenerationStamp();
                long gsNew;
                do {
                    gsNew = rand.nextInt();
                } while (gsNew == gsOld);
                block.setGenerationStamp(gsNew);
                LOG.info("Corrupted the GS for block ID " + block);
                corruptedGs = true;
            } else if (corruptOneBlockLen && !corruptedLen) {
                long lenOld = block.getNumBytes();
                long lenNew;
                do {
                    lenNew = rand.nextInt((int) lenOld - 1);
                } while (lenNew == lenOld);
                block.setNumBytes(lenNew);
                LOG.info("Corrupted the length for block ID " + block);
                corruptedLen = true;
            }
            builder.add(new BlockReportReplica(block));
        }
        reports[reportIndex++] = new StorageBlockReport(dnStorage, builder.build());
    }
    return reports;
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Map(java.util.Map)

Example 2 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class BPServiceActor method blockReport.

/**
   * Report the list blocks to the Namenode
   * @return DatanodeCommands returned by the NN. May be null.
   * @throws IOException
   */
List<DatanodeCommand> blockReport(long fullBrLeaseId) throws IOException {
    final ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
    // Flush any block information that precedes the block report. Otherwise
    // we have a chance that we will miss the delHint information
    // or we will report an RBW replica after the BlockReport already reports
    // a FINALIZED one.
    ibrManager.sendIBRs(bpNamenode, bpRegistration, bpos.getBlockPoolId(), dn.getMetrics());
    long brCreateStartTime = monotonicNow();
    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());
    // Convert the reports to the format expected by the NN.
    int i = 0;
    int totalBlockCount = 0;
    StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()];
    for (Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
        BlockListAsLongs blockList = kvPair.getValue();
        reports[i++] = new StorageBlockReport(kvPair.getKey(), blockList);
        totalBlockCount += blockList.getNumberOfBlocks();
    }
    // Send the reports to the NN.
    int numReportsSent = 0;
    int numRPCs = 0;
    boolean success = false;
    long brSendStartTime = monotonicNow();
    long reportId = generateUniqueBlockReportId();
    boolean useBlocksBuffer = bpRegistration.getNamespaceInfo().isCapabilitySupported(NamespaceInfo.Capability.STORAGE_BLOCK_REPORT_BUFFERS);
    blockReportSizes.clear();
    try {
        if (totalBlockCount < dnConf.blockReportSplitThreshold) {
            // Below split threshold, send all reports in a single message.
            DatanodeCommand cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), reports, new BlockReportContext(1, 0, reportId, fullBrLeaseId, true));
            blockReportSizes.add(calculateBlockReportPBSize(useBlocksBuffer, reports));
            numRPCs = 1;
            numReportsSent = reports.length;
            if (cmd != null) {
                cmds.add(cmd);
            }
        } else {
            // Send one block report per message.
            for (int r = 0; r < reports.length; r++) {
                StorageBlockReport[] singleReport = { reports[r] };
                DatanodeCommand cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), singleReport, new BlockReportContext(reports.length, r, reportId, fullBrLeaseId, true));
                blockReportSizes.add(calculateBlockReportPBSize(useBlocksBuffer, singleReport));
                numReportsSent++;
                numRPCs++;
                if (cmd != null) {
                    cmds.add(cmd);
                }
            }
        }
        success = true;
    } finally {
        // Log the block report processing stats from Datanode perspective
        long brSendCost = monotonicNow() - brSendStartTime;
        long brCreateCost = brSendStartTime - brCreateStartTime;
        dn.getMetrics().addBlockReport(brSendCost);
        final int nCmds = cmds.size();
        LOG.info((success ? "S" : "Uns") + "uccessfully sent block report 0x" + Long.toHexString(reportId) + ",  containing " + reports.length + " storage report(s), of which we sent " + numReportsSent + "." + " The reports had " + totalBlockCount + " total blocks and used " + numRPCs + " RPC(s). This took " + brCreateCost + " msec to generate and " + brSendCost + " msecs for RPC and NN processing." + " Got back " + ((nCmds == 0) ? "no commands" : ((nCmds == 1) ? "one command: " + cmds.get(0) : (nCmds + " commands: " + Joiner.on("; ").join(cmds)))) + ".");
    }
    scheduler.updateLastBlockReportTime(monotonicNow());
    scheduler.scheduleNextBlockReport();
    return cmds.size() == 0 ? null : cmds;
}
Also used : ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Map(java.util.Map) HashMap(java.util.HashMap)

Example 3 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestDFSShell method getMaterializedReplicas.

private static List<MaterializedReplica> getMaterializedReplicas(MiniDFSCluster cluster) throws IOException {
    List<MaterializedReplica> replicas = new ArrayList<>();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
    for (int i = 0; i < blocks.size(); i++) {
        Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
        for (Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
            for (Block b : e.getValue()) {
                replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b)));
            }
        }
    }
    return replicas;
}
Also used : MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StringContains.containsString(org.hamcrest.core.StringContains.containsString) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Map(java.util.Map)

Example 4 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestDFSOutputStream method testNoLocalWriteFlag.

@Test
public void testNoLocalWriteFlag() throws IOException {
    DistributedFileSystem fs = cluster.getFileSystem();
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
        // Inject a DatanodeManager that returns one DataNode as local node for
        // the client.
        DatanodeManager spyDm = spy(dm);
        DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
        doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
        Whitebox.setInternalState(bm, "datanodeManager", spyDm);
        byte[] buf = new byte[512 * 16];
        new Random().nextBytes(buf);
        os.write(buf);
    } finally {
        Whitebox.setInternalState(bm, "datanodeManager", dm);
    }
    cluster.triggerBlockReports();
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    // Total number of DataNodes is 3.
    assertEquals(3, cluster.getAllBlockReports(bpid).size());
    int numDataNodesWithData = 0;
    for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
        for (BlockListAsLongs blocks : dnBlocks.values()) {
            if (blocks.getNumberOfBlocks() > 0) {
                numDataNodesWithData++;
                break;
            }
        }
    }
    // Verify that only one DN has no data.
    assertEquals(1, 3 - numDataNodesWithData);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Random(java.util.Random) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 5 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testRemoveOneVolume.

@Test(timeout = 60000)
public void testRemoveOneVolume() throws ReconfigurationException, InterruptedException, TimeoutException, IOException {
    startDFSCluster(1, 1);
    final short replFactor = 1;
    Path testFile = new Path("/test");
    createFile(testFile, 10, replFactor);
    DataNode dn = cluster.getDataNodes().get(0);
    Collection<String> oldDirs = getDataDirs(dn);
    // Keep the first volume.
    String newDirs = oldDirs.iterator().next();
    assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
    assertFileLocksReleased(new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
    dn.scheduleAllBlockReport(0);
    try {
        DFSTestUtil.readFile(cluster.getFileSystem(), testFile);
        fail("Expect to throw BlockMissingException.");
    } catch (BlockMissingException e) {
        GenericTestUtils.assertExceptionContains("Could not obtain block", e);
    }
    Path newFile = new Path("/newFile");
    createFile(newFile, 6);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid);
    assertEquals((int) replFactor, blockReports.size());
    BlockListAsLongs blocksForVolume1 = blockReports.get(0).values().iterator().next();
    // The first volume has half of the testFile and full of newFile.
    assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks());
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Map(java.util.Map) Test(org.junit.Test)

Aggregations

BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)23 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)12 Test (org.junit.Test)11 ArrayList (java.util.ArrayList)8 Map (java.util.Map)8 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)8 Path (org.apache.hadoop.fs.Path)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)5 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)4 Matchers.anyString (org.mockito.Matchers.anyString)4 IOException (java.io.IOException)3 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)3 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)3 ServiceException (com.google.protobuf.ServiceException)2 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2