Search in sources :

Example 36 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeManager method testRejectUnresolvedDatanodes.

@Test(timeout = 100000)
public void testRejectUnresolvedDatanodes() throws IOException {
    //Create the DatanodeManager which will be tested
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    Configuration conf = new Configuration();
    //Set configuration property for rejecting unresolved topology mapping
    conf.setBoolean(DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, true);
    //set TestDatanodeManager.MyResolver to be used for topology resolving
    conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, TestDatanodeManager.MyResolver.class, DNSToSwitchMapping.class);
    //create DatanodeManager
    DatanodeManager dm = mockDatanodeManager(fsn, conf);
    //storageID to register.
    String storageID = "someStorageID-123";
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
    try {
        //Register this node
        dm.registerDatanode(dr);
        Assert.fail("Expected an UnresolvedTopologyException");
    } catch (UnresolvedTopologyException ute) {
        LOG.info("Expected - topology is not resolved and " + "registration is rejected.");
    } catch (Exception e) {
        Assert.fail("Expected an UnresolvedTopologyException");
    }
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Configuration(org.apache.hadoop.conf.Configuration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) URISyntaxException(java.net.URISyntaxException) IOException(java.io.IOException) Test(org.junit.Test)

Example 37 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestHeartbeatHandling method testHeartbeat.

/**
   * Test if
   * {@link FSNamesystem#handleHeartbeat}
   * can pick up replication and/or invalidate requests and observes the max
   * limit
   */
@Test
public void testHeartbeat() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
        final String storageID = DatanodeStorage.generateUuid();
        dd.updateStorage(new DatanodeStorage(storageID));
        final int REMAINING_BLOCKS = 1;
        final int MAX_REPLICATE_LIMIT = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
        final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
        final int MAX_INVALIDATE_BLOCKS = 2 * MAX_INVALIDATE_LIMIT + REMAINING_BLOCKS;
        final int MAX_REPLICATE_BLOCKS = 2 * MAX_REPLICATE_LIMIT + REMAINING_BLOCKS;
        final DatanodeStorageInfo[] ONE_TARGET = { dd.getStorageInfo(storageID) };
        try {
            namesystem.writeLock();
            synchronized (hm) {
                for (int i = 0; i < MAX_REPLICATE_BLOCKS; i++) {
                    dd.addBlockToBeReplicated(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP), ONE_TARGET);
                }
                DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
                ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
                for (int i = 0; i < MAX_INVALIDATE_BLOCKS; i++) {
                    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
                }
                dd.addBlocksToBeInvalidated(blockList);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(2, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
                assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(2, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
                assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
                assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(0, cmds.length);
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 38 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestHeartbeatHandling method testHeartbeatBlockRecovery.

/**
   * Test if
   * {@link FSNamesystem#handleHeartbeat}
   * correctly selects data node targets for block recovery.
   */
@Test
public void testHeartbeatBlockRecovery() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg1 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
        dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        final DatanodeRegistration nodeReg2 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
        final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
        dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        final DatanodeRegistration nodeReg3 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
        final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
        dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        try {
            namesystem.writeLock();
            synchronized (hm) {
                NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
                NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
                NameNodeAdapter.sendHeartBeat(nodeReg3, dd3, namesystem);
                // Test with all alive nodes.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, 0);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
                final DatanodeStorageInfo[] storages = { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0] };
                BlockInfo blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                assertEquals(3, recoveringNodes.length);
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd2);
                assertEquals(recoveringNodes[2], dd3);
                // Test with one stale node.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
                // More than the default stale interval of 30 seconds.
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
                blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                assertEquals(2, recoveringNodes.length);
                // dd2 is skipped.
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd3);
                // Test with all stale node.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, -60 * 1000);
                // More than the default stale interval of 30 seconds.
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, -80 * 1000);
                blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                // Only dd1 is included since it heart beated and hence its not stale
                // when the list of recovery blocks is constructed.
                assertEquals(3, recoveringNodes.length);
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd2);
                assertEquals(recoveringNodes[2], dd3);
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : BlockRecoveryCommand(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 39 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class BlockReportTestBase method blockReport_07.

/**
   * Similar to BlockReport_03() but works with two DNs
   * Test writes a file and closes it.
   * The second datanode is started in the cluster.
   * As soon as the replication process is completed test finds a block from
   * the second DN and sets its GS to be < of original one.
   * this is the markBlockAsCorrupt case 3 so we expect one pending deletion
   * Block report is forced and the check for # of currupted blocks is performed.
   * Another block is chosen and its length is set to a lesser than original.
   * A check for another corrupted block is performed after yet another
   * BlockReport
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 300000)
public void blockReport_07() throws Exception {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    final int DN_N1 = DN_N0 + 1;
    // write file and start second node to be "older" than the original
    writeFile(METHOD_NAME, FILE_SIZE, filePath);
    startDNandWait(filePath, true);
    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N1);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
    sendBlockReports(dnR, poolId, reports);
    printStats();
    assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
    assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
    assertThat("Wrong number of PendingReplication blocks", cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
    reports = getBlockReports(dn, poolId, false, true);
    sendBlockReports(dnR, poolId, reports);
    printStats();
    assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
    assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
    assertThat("Wrong number of PendingReplication blocks", cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
    printStats();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Example 40 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class BlockReportTestBase method blockReport_08.

/**
   * The test set the configuration parameters for a large block size and
   * restarts initiated single-node cluster.
   * Then it writes a file > block_size and closes it.
   * The second datanode is started in the cluster.
   * As soon as the replication process is started and at least one TEMPORARY
   * replica is found test forces BlockReport process and checks
   * if the TEMPORARY replica isn't reported on it.
   * Eventually, the configuration is being restored into the original state.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 300000)
public void blockReport_08() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    final int DN_N1 = DN_N0 + 1;
    final int bytesChkSum = 1024 * 1000;
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
    shutDownCluster();
    startUpCluster();
    try {
        ArrayList<Block> blocks = writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
        Block bl = findBlock(filePath, 12 * bytesChkSum);
        BlockChecker bc = new BlockChecker(filePath);
        bc.start();
        waitForTempReplica(bl, DN_N1);
        // all blocks belong to the same file, hence same BP
        DataNode dn = cluster.getDataNodes().get(DN_N1);
        String poolId = cluster.getNamesystem().getBlockPoolId();
        DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
        StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
        sendBlockReports(dnR, poolId, reports);
        printStats();
        assertEquals("Wrong number of PendingReplication blocks", blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
        try {
            bc.join();
        } catch (InterruptedException e) {
        }
    } finally {
        // return the initial state of the configuration
        resetConfiguration();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5