Search in sources :

Example 11 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class DatanodeProtocolServerSideTranslatorPB method blockReceivedAndDeleted.

@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(RpcController controller, BlockReceivedAndDeletedRequestProto request) throws ServiceException {
    List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
    StorageReceivedDeletedBlocks[] info = new StorageReceivedDeletedBlocks[sBlocks.size()];
    for (int i = 0; i < sBlocks.size(); i++) {
        StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
        List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
        ReceivedDeletedBlockInfo[] rdBlocks = new ReceivedDeletedBlockInfo[list.size()];
        for (int j = 0; j < list.size(); j++) {
            rdBlocks[j] = PBHelper.convert(list.get(j));
        }
        if (sBlock.hasStorage()) {
            info[i] = new StorageReceivedDeletedBlocks(PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
        } else {
            info[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
        }
    }
    try {
        impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), info);
    } catch (IOException e) {
        throw new ServiceException(e);
    }
    return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
Also used : ReceivedDeletedBlockInfoProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) ServiceException(com.google.protobuf.ServiceException) StorageReceivedDeletedBlocksProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) IOException(java.io.IOException) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 12 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class IncrementalBlockReportManager method generateIBRs.

private synchronized StorageReceivedDeletedBlocks[] generateIBRs() {
    final List<StorageReceivedDeletedBlocks> reports = new ArrayList<>(pendingIBRs.size());
    for (Map.Entry<DatanodeStorage, PerStorageIBR> entry : pendingIBRs.entrySet()) {
        final PerStorageIBR perStorage = entry.getValue();
        // Send newly-received and deleted blockids to namenode
        final ReceivedDeletedBlockInfo[] rdbi = perStorage.removeAll();
        if (rdbi != null) {
            reports.add(new StorageReceivedDeletedBlocks(entry.getKey(), rdbi));
        }
    }
    readyToSend = false;
    return reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]);
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ArrayList(java.util.ArrayList) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) Map(java.util.Map) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 13 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.

@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
    final int numBlocks = 4;
    final int numStripes = 4;
    final Path filePath = new Path("/corrupt");
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    final BlockManager bm = ns.getBlockManager();
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
    INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
    Assert.assertTrue(fileNode.isStriped());
    BlockInfo stored = fileNode.getBlocks()[0];
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with correct size
    DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
    final Block reported = new Block(stored);
    reported.setNumBytes(numStripes * cellSize);
    StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with wrong size
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize - 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with correct size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with wrong size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize + 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    // the total number of corrupted block info is still 1
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // 2 internal blocks corrupted
    Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
    // Now change the size of stored block, and test verifying the last
    // block size
    stored.setNumBytes(stored.getNumBytes() + 10);
    reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    // Now send a parity block report with correct size based on adjusted
    // size of stored block
    /** Now stored block has {@link numStripes} full stripes + a cell + 10 */
    stored.setNumBytes(stored.getNumBytes() + cellSize);
    reported.setBlockId(stored.getBlockId());
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize + 10);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) Test(org.junit.Test)

Aggregations

StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)13 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)8 Path (org.apache.hadoop.fs.Path)6 Test (org.junit.Test)6 Block (org.apache.hadoop.hdfs.protocol.Block)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 Configuration (org.apache.hadoop.conf.Configuration)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 ServiceException (com.google.protobuf.ServiceException)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)2