Search in sources :

Example 16 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDeadDatanode method testDeadDatanode.

/**
   * Test to ensure namenode rejects request from dead datanode
   * - Start a cluster
   * - Shutdown the datanode and wait for it to be marked dead at the namenode
   * - Send datanode requests to Namenode and make sure it is rejected 
   *   appropriately.
   */
@Test
public void testDeadDatanode() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    DatanodeProtocol dnp = cluster.getNameNodeRpc();
    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
    StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
    // Ensure blockReceived call from dead datanode is not rejected with
    // IOException, since it's async, but the node remains unregistered.
    dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    // IBRs are async, make sure the NN processes all of them.
    bm.flushBlockOps();
    assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
    try {
        dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
        fail("Expected IOException is not thrown");
    } catch (IOException ex) {
    // Expected
    }
    // Ensure heartbeat from dead datanode is rejected with a command
    // that asks datanode to register again
    StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
    assertEquals(1, cmd.length);
    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 17 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDeadDatanode method testDeadNodeAsBlockTarget.

@Test
public void testDeadNodeAsBlockTarget() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    // Get the updated datanode descriptor
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    Node clientNode = dm.getDatanode(reg);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    // Get the updated datanode descriptor available in DNM
    // choose the targets, but local node should not get selected as this is not
    // part of the cluster anymore
    DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3, clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7, BlockType.CONTIGUOUS, null);
    for (DatanodeStorageInfo datanodeStorageInfo : results) {
        assertFalse("Dead node should not be choosen", datanodeStorageInfo.getDatanodeDescriptor().equals(clientNode));
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Test(org.junit.Test)

Example 18 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestPBHelper method testConvertDatanodeRegistration.

@Test
public void testConvertDatanodeRegistration() {
    DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys);
    DatanodeRegistration reg = new DatanodeRegistration(dnId, new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
    DatanodeRegistrationProto proto = PBHelper.convert(reg);
    DatanodeRegistration reg2 = PBHelper.convert(proto);
    compare(reg.getStorageInfo(), reg2.getStorageInfo());
    compare(reg.getExportedKeys(), reg2.getExportedKeys());
    compare(reg, reg2);
    assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeRegistrationProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) BlockKey(org.apache.hadoop.hdfs.security.token.block.BlockKey) Test(org.junit.Test)

Example 19 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockListAsLongs method testDatanodeDetect.

@Test
public void testDatanodeDetect() throws ServiceException, IOException {
    final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
    // just capture the outgoing PB
    DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
    doAnswer(new Answer<BlockReportResponseProto>() {

        public BlockReportResponseProto answer(InvocationOnMock invocation) {
            Object[] args = invocation.getArguments();
            request.set((BlockReportRequestProto) args[1]);
            return BlockReportResponseProto.newBuilder().build();
        }
    }).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
    @SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
    DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
    NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
    reg.setNamespaceInfo(nsInfo);
    Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
    BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
    DatanodeStorage storage = new DatanodeStorage("s1");
    StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
    // check DN sends new-style BR
    request.set(null);
    nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
    nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    BlockReportRequestProto proto = request.get();
    assertNotNull(proto);
    assertTrue(proto.getReports(0).getBlocksList().isEmpty());
    assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
    // back up to prior version and check DN sends old-style BR
    request.set(null);
    nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
    BlockListAsLongs blockList = getBlockList(r);
    StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
    nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    proto = request.get();
    assertNotNull(proto);
    assertFalse(proto.getReports(0).getBlocksList().isEmpty());
    assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) AtomicReference(java.util.concurrent.atomic.AtomicReference) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) RpcController(com.google.protobuf.RpcController) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) BlockReportRequestProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) InvocationOnMock(org.mockito.invocation.InvocationOnMock) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockReportResponseProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) Test(org.junit.Test)

Example 20 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class DataNode method createBPRegistration.

/**
   * Create a DatanodeRegistration for a specific block pool.
   * @param nsInfo the namespace info from the first part of the NN handshake
   */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
    StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
    if (storageInfo == null) {
        // it's null in the case of SimulatedDataSet
        storageInfo = new StorageInfo(DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION, nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(), NodeType.DATA_NODE);
    }
    DatanodeID dnId = new DatanodeID(streamingAddr.getAddress().getHostAddress(), hostName, storage.getDatanodeUuid(), getXferPort(), getInfoPort(), infoSecurePort, getIpcPort());
    return new DatanodeRegistration(dnId, storageInfo, new ExportedBlockKeys(), VersionInfo.getVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5