Search in sources :

Example 11 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class BlockReportTestBase method testInterleavedBlockReports.

// See HDFS-10301
@Test(timeout = 300000)
public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException {
    int numConcurrentBlockReports = 3;
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    final String poolId = cluster.getNamesystem().getBlockPoolId();
    LOG.info("Block pool id: " + poolId);
    final DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    final StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
    // Get the list of storage ids associated with the datanode
    // before the test
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
    DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
    // Send the block report concurrently using
    // numThreads=numConcurrentBlockReports
    ExecutorService executorService = Executors.newFixedThreadPool(numConcurrentBlockReports);
    List<Future<Void>> futureList = new ArrayList<>(numConcurrentBlockReports);
    for (int i = 0; i < numConcurrentBlockReports; i++) {
        futureList.add(executorService.submit(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                sendBlockReports(dnR, poolId, reports);
                return null;
            }
        }));
    }
    for (Future<Void> future : futureList) {
        future.get();
    }
    executorService.shutdown();
    // Verify that the storages match before and after the test
    Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 12 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class BPServiceActor method blockReport.

/**
   * Report the list blocks to the Namenode
   * @return DatanodeCommands returned by the NN. May be null.
   * @throws IOException
   */
List<DatanodeCommand> blockReport(long fullBrLeaseId) throws IOException {
    final ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
    // Flush any block information that precedes the block report. Otherwise
    // we have a chance that we will miss the delHint information
    // or we will report an RBW replica after the BlockReport already reports
    // a FINALIZED one.
    ibrManager.sendIBRs(bpNamenode, bpRegistration, bpos.getBlockPoolId(), dn.getMetrics());
    long brCreateStartTime = monotonicNow();
    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());
    // Convert the reports to the format expected by the NN.
    int i = 0;
    int totalBlockCount = 0;
    StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()];
    for (Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
        BlockListAsLongs blockList = kvPair.getValue();
        reports[i++] = new StorageBlockReport(kvPair.getKey(), blockList);
        totalBlockCount += blockList.getNumberOfBlocks();
    }
    // Send the reports to the NN.
    int numReportsSent = 0;
    int numRPCs = 0;
    boolean success = false;
    long brSendStartTime = monotonicNow();
    long reportId = generateUniqueBlockReportId();
    boolean useBlocksBuffer = bpRegistration.getNamespaceInfo().isCapabilitySupported(NamespaceInfo.Capability.STORAGE_BLOCK_REPORT_BUFFERS);
    blockReportSizes.clear();
    try {
        if (totalBlockCount < dnConf.blockReportSplitThreshold) {
            // Below split threshold, send all reports in a single message.
            DatanodeCommand cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), reports, new BlockReportContext(1, 0, reportId, fullBrLeaseId, true));
            blockReportSizes.add(calculateBlockReportPBSize(useBlocksBuffer, reports));
            numRPCs = 1;
            numReportsSent = reports.length;
            if (cmd != null) {
                cmds.add(cmd);
            }
        } else {
            // Send one block report per message.
            for (int r = 0; r < reports.length; r++) {
                StorageBlockReport[] singleReport = { reports[r] };
                DatanodeCommand cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), singleReport, new BlockReportContext(reports.length, r, reportId, fullBrLeaseId, true));
                blockReportSizes.add(calculateBlockReportPBSize(useBlocksBuffer, singleReport));
                numReportsSent++;
                numRPCs++;
                if (cmd != null) {
                    cmds.add(cmd);
                }
            }
        }
        success = true;
    } finally {
        // Log the block report processing stats from Datanode perspective
        long brSendCost = monotonicNow() - brSendStartTime;
        long brCreateCost = brSendStartTime - brCreateStartTime;
        dn.getMetrics().addBlockReport(brSendCost);
        final int nCmds = cmds.size();
        LOG.info((success ? "S" : "Uns") + "uccessfully sent block report 0x" + Long.toHexString(reportId) + ",  containing " + reports.length + " storage report(s), of which we sent " + numReportsSent + "." + " The reports had " + totalBlockCount + " total blocks and used " + numRPCs + " RPC(s). This took " + brCreateCost + " msec to generate and " + brSendCost + " msecs for RPC and NN processing." + " Got back " + ((nCmds == 0) ? "no commands" : ((nCmds == 1) ? "one command: " + cmds.get(0) : (nCmds + " commands: " + Joiner.on("; ").join(cmds)))) + ".");
    }
    scheduler.updateLastBlockReportTime(monotonicNow());
    scheduler.scheduleNextBlockReport();
    return cmds.size() == 0 ? null : cmds;
}
Also used : ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Map(java.util.Map) HashMap(java.util.HashMap)

Example 13 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class TestAddStripedBlocks method testAddUCReplica.

/**
   * Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
   * scenarios.
   */
@Test
public void testAddUCReplica() throws Exception {
    final Path file = new Path("/file1");
    final List<String> storageIDs = new ArrayList<>();
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        // 1. create the UC striped block
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
        BlockInfo lastBlock = fileNode.getLastBlock();
        DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        // 2. mimic incremental block reports and make sure the uc-replica list in
        // the BlockInfoUCStriped is correct
        int i = 0;
        for (DataNode dn : cluster.getDataNodes()) {
            final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
            DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
            storageIDs.add(storage.getStorageID());
            StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
            for (StorageReceivedDeletedBlocks report : reports) {
                cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
            }
        }
        // make sure lastBlock is correct and the storages have been updated
        locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        for (DatanodeStorageInfo newstorage : locs) {
            Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
        }
    } finally {
        IOUtils.cleanup(null, out);
    }
    // 3. restart the namenode. mimic the full block reports and check the
    // uc-replica list again
    cluster.restartNameNode(true);
    final String bpId = cluster.getNamesystem().getBlockPoolId();
    INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
    BlockInfo lastBlock = fileNode.getLastBlock();
    int i = groupSize - 1;
    for (DataNode dn : cluster.getDataNodes()) {
        String storageID = storageIDs.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
        DatanodeStorage storage = new DatanodeStorage(storageID);
        List<ReplicaBeingWritten> blocks = new ArrayList<>();
        ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
        blocks.add(replica);
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
        cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    }
    DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
    Assert.assertEquals(groupSize, locs.length);
    Assert.assertEquals(groupSize, indices.length);
    for (i = 0; i < groupSize; i++) {
        Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
        Assert.assertEquals(groupSize - i - 1, indices[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 14 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class DatanodeProtocolClientSideTranslatorPB method blockReport.

@Override
public DatanodeCommand blockReport(DatanodeRegistration registration, String poolId, StorageBlockReport[] reports, BlockReportContext context) throws IOException {
    BlockReportRequestProto.Builder builder = BlockReportRequestProto.newBuilder().setRegistration(PBHelper.convert(registration)).setBlockPoolId(poolId);
    boolean useBlocksBuffer = registration.getNamespaceInfo().isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);
    for (StorageBlockReport r : reports) {
        StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto.newBuilder().setStorage(PBHelperClient.convert(r.getStorage()));
        BlockListAsLongs blocks = r.getBlocks();
        if (useBlocksBuffer) {
            reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
            reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
        } else {
            for (long value : blocks.getBlockListAsLongs()) {
                reportBuilder.addBlocks(value);
            }
        }
        builder.addReports(reportBuilder.build());
    }
    builder.setContext(PBHelper.convert(context));
    BlockReportResponseProto resp;
    try {
        resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
    } catch (ServiceException se) {
        throw ProtobufHelper.getRemoteException(se);
    }
    return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
Also used : BlockReportRequestProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) ServiceException(com.google.protobuf.ServiceException) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) BlockReportResponseProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) StorageBlockReportProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)

Example 15 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class DatanodeProtocolServerSideTranslatorPB method blockReport.

@Override
public BlockReportResponseProto blockReport(RpcController controller, BlockReportRequestProto request) throws ServiceException {
    DatanodeCommand cmd = null;
    StorageBlockReport[] report = new StorageBlockReport[request.getReportsCount()];
    int index = 0;
    for (StorageBlockReportProto s : request.getReportsList()) {
        final BlockListAsLongs blocks;
        if (s.hasNumberOfBlocks()) {
            // new style buffer based reports
            int num = (int) s.getNumberOfBlocks();
            Preconditions.checkState(s.getBlocksCount() == 0, "cannot send both blocks list and buffers");
            blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList(), maxDataLength);
        } else {
            blocks = BlockListAsLongs.decodeLongs(s.getBlocksList(), maxDataLength);
        }
        report[index++] = new StorageBlockReport(PBHelperClient.convert(s.getStorage()), blocks);
    }
    try {
        cmd = impl.blockReport(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), report, request.hasContext() ? PBHelper.convert(request.getContext()) : null);
    } catch (IOException e) {
        throw new ServiceException(e);
    }
    BlockReportResponseProto.Builder builder = BlockReportResponseProto.newBuilder();
    if (cmd != null) {
        builder.setCmd(PBHelper.convert(cmd));
    }
    return builder.build();
}
Also used : DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) ServiceException(com.google.protobuf.ServiceException) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) IOException(java.io.IOException) BlockReportResponseProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) StorageBlockReportProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)

Aggregations

StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)28 Test (org.junit.Test)21 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)15 Path (org.apache.hadoop.fs.Path)11 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)8 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)8 Block (org.apache.hadoop.hdfs.protocol.Block)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)6 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)6 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 BlockReportResponseProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)3 InvocationOnMock (org.mockito.invocation.InvocationOnMock)3 ServiceException (com.google.protobuf.ServiceException)2 IOException (java.io.IOException)2