Search in sources :

Example 1 with ReceivedDeletedBlockInfo

use of org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo in project hadoop by apache.

the class TestTriggerBlockReport method testTriggerBlockReport.

private void testTriggerBlockReport(boolean incremental) throws Exception {
    Configuration conf = new HdfsConfiguration();
    // Set a really long value for dfs.blockreport.intervalMsec and
    // dfs.heartbeat.interval, so that incremental block reports and heartbeats
    // won't be sent during this test unless they're triggered
    // manually.
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
    DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);
    // We should get 1 incremental block report.
    Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    // since the interval we configured is so long.
    for (int i = 0; i < 3; i++) {
        Thread.sleep(10);
        Mockito.verify(spy, times(0)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
        Mockito.verify(spy, times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    }
    // Create a fake block deletion notification on the DataNode.
    // This will be sent with the next incremental block report.
    ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512, 1000), BlockStatus.DELETED_BLOCK, null);
    DataNode datanode = cluster.getDataNodes().get(0);
    BPServiceActor actor = datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
    final FsDatasetSpi<?> dataset = datanode.getFSDataset();
    final DatanodeStorage storage;
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        storage = dataset.getStorage(volumes.get(0).getStorageID());
    }
    actor.getIbrManager().addRDBI(rdbi, storage);
    // Manually trigger a block report.
    datanode.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
    // actually sent.  Wait for it to be sent here.
    if (incremental) {
        Mockito.verify(spy, timeout(60000).times(2)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    } else {
        Mockito.verify(spy, timeout(60000)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
    }
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) FileSystem(org.apache.hadoop.fs.FileSystem) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 2 with ReceivedDeletedBlockInfo

use of org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo in project hadoop by apache.

the class DFSTestUtil method makeReportForReceivedBlock.

public static StorageReceivedDeletedBlocks[] makeReportForReceivedBlock(Block block, BlockStatus blockStatus, DatanodeStorage storage) {
    ReceivedDeletedBlockInfo[] receivedBlocks = new ReceivedDeletedBlockInfo[1];
    receivedBlocks[0] = new ReceivedDeletedBlockInfo(block, blockStatus, null);
    StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[1];
    reports[0] = new StorageReceivedDeletedBlocks(storage, receivedBlocks);
    return reports;
}
Also used : StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 3 with ReceivedDeletedBlockInfo

use of org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo in project hadoop by apache.

the class TestBlockManager method testSafeModeIBRBeforeFirstFullBR.

/**
   * test when NN starts and in same mode, it receives an incremental blockReport
   * firstly. Then receives first full block report.
   */
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
    // pretend to be in safemode
    doReturn(true).when(fsn).isInStartupSafeMode();
    DatanodeDescriptor node = nodes.get(0);
    DatanodeStorageInfo ds = node.getStorageInfos()[0];
    node.setAlive(true);
    DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
    // register new node
    bm.getDatanodeManager().registerDatanode(nodeReg);
    bm.getDatanodeManager().addDatanode(node);
    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
    assertEquals(0, ds.getBlockReportCount());
    // Build a incremental report
    List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
    // Build a full report
    BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
    // blk_42 is finalized.
    // arbitrary
    long receivedBlockId = 42;
    BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    builder.add(new FinalizedReplica(receivedBlock, null, null));
    // blk_43 is under construction.
    long receivingBlockId = 43;
    BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
    builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
    // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
    long receivingReceivedBlockId = 44;
    BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));
    // blk_45 is not in full BR, because it's deleted.
    long ReceivedDeletedBlockId = 45;
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
    // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
    long existedBlockId = 46;
    BlockInfo existedBlock = addBlockToBM(existedBlockId);
    builder.add(new FinalizedReplica(existedBlock, null, null));
    // process IBR and full BR
    StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
    bm.processIncrementalBlockReport(node, srdb);
    // Make sure it's the first full report
    assertEquals(0, ds.getBlockReportCount());
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    assertEquals(1, ds.getBlockReportCount());
    // verify the storage info is correct
    assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo(ds) >= 0);
    assertTrue(bm.getStoredBlock(new Block(receivingBlockId)).getUnderConstructionFeature().getNumExpectedLocations() > 0);
    assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)).findStorageInfo(ds) >= 0);
    assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
    assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo(ds) >= 0);
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 4 with ReceivedDeletedBlockInfo

use of org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo in project hadoop by apache.

the class TestDeadDatanode method testDeadDatanode.

/**
   * Test to ensure namenode rejects request from dead datanode
   * - Start a cluster
   * - Shutdown the datanode and wait for it to be marked dead at the namenode
   * - Send datanode requests to Namenode and make sure it is rejected 
   *   appropriately.
   */
@Test
public void testDeadDatanode() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    DatanodeProtocol dnp = cluster.getNameNodeRpc();
    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
    StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
    // Ensure blockReceived call from dead datanode is not rejected with
    // IOException, since it's async, but the node remains unregistered.
    dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    // IBRs are async, make sure the NN processes all of them.
    bm.flushBlockOps();
    assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
    try {
        dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
        fail("Expected IOException is not thrown");
    } catch (IOException ex) {
    // Expected
    }
    // Ensure heartbeat from dead datanode is rejected with a command
    // that asks datanode to register again
    StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
    assertEquals(1, cmd.length);
    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 5 with ReceivedDeletedBlockInfo

use of org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo in project hadoop by apache.

the class TestPendingReconstruction method testBlockReceived.

/**
   * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
   * pending reconstruction. Also make sure the blockReceivedAndDeleted call is
   * idempotent to the pending reconstruction.
   */
@Test
public void testBlockReceived() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager blkManager = fsn.getBlockManager();
        final String file = "/tmp.txt";
        final Path filePath = new Path(file);
        short replFactor = 1;
        DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
        // temporarily stop the heartbeat
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        for (int i = 0; i < DATANODE_COUNT; i++) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
        }
        hdfs.setReplication(filePath, (short) DATANODE_COUNT);
        BlockManagerTestUtil.computeAllPendingWork(blkManager);
        assertEquals(1, blkManager.pendingReconstruction.size());
        INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(DATANODE_COUNT - 1, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0).get(0);
        DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
        int reportDnNum = 0;
        String poolId = cluster.getNamesystem().getBlockPoolId();
        // report to NN
        for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
            if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
                DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
                StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
                reportDnNum++;
            }
        }
        // IBRs are async, make sure the NN processes all of them.
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        // let the same datanodes report again
        for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
            if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
                DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
                StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
                reportDnNum++;
            }
        }
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        // re-enable heartbeat for the datanode that has data
        for (int i = 0; i < DATANODE_COUNT; i++) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), false);
            DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
        }
        Thread.sleep(5000);
        assertEquals(0, blkManager.pendingReconstruction.size());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Aggregations

ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)7 StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)7 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 Block (org.apache.hadoop.hdfs.protocol.Block)3 Test (org.junit.Test)3 ServiceException (com.google.protobuf.ServiceException)2 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Path (org.apache.hadoop.fs.Path)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 StorageReceivedDeletedBlocksProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)2 Map (java.util.Map)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1