Search in sources :

Example 91 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestComputeInvalidateWork method testDatanodeReformat.

/**
   * Reformatted DataNodes will replace the original UUID in the
   * {@link DatanodeManager#datanodeMap}. This tests if block
   * invalidation work on the original DataNode can be skipped.
   */
@Test(timeout = 120000)
public void testDatanodeReformat() throws Exception {
    namesystem.writeLock();
    try {
        // Change the datanode UUID to emulate a reformat
        String poolId = cluster.getNamesystem().getBlockPoolId();
        DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort()).getDNRegistrationForBP(poolId);
        dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr);
        cluster.stopDataNode(nodes[0].getXferAddr());
        Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP);
        bm.addToInvalidates(block, nodes[0]);
        bm.getDatanodeManager().registerDatanode(dnr);
        // Since UUID has changed, the invalidation work should be skipped
        assertEquals(0, bm.computeInvalidateWork(1));
        assertEquals(0, bm.getPendingDeletionBlocksCount());
    } finally {
        namesystem.writeUnlock();
    }
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 92 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestHeartbeatHandling method testHeartbeat.

/**
   * Test if
   * {@link FSNamesystem#handleHeartbeat}
   * can pick up replication and/or invalidate requests and observes the max
   * limit
   */
@Test
public void testHeartbeat() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
        final String storageID = DatanodeStorage.generateUuid();
        dd.updateStorage(new DatanodeStorage(storageID));
        final int REMAINING_BLOCKS = 1;
        final int MAX_REPLICATE_LIMIT = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
        final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
        final int MAX_INVALIDATE_BLOCKS = 2 * MAX_INVALIDATE_LIMIT + REMAINING_BLOCKS;
        final int MAX_REPLICATE_BLOCKS = 2 * MAX_REPLICATE_LIMIT + REMAINING_BLOCKS;
        final DatanodeStorageInfo[] ONE_TARGET = { dd.getStorageInfo(storageID) };
        try {
            namesystem.writeLock();
            synchronized (hm) {
                for (int i = 0; i < MAX_REPLICATE_BLOCKS; i++) {
                    dd.addBlockToBeReplicated(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP), ONE_TARGET);
                }
                DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
                ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
                for (int i = 0; i < MAX_INVALIDATE_BLOCKS; i++) {
                    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
                }
                dd.addBlocksToBeInvalidated(blockList);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(2, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
                assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(2, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
                assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
                assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(0, cmds.length);
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 93 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestHeartbeatHandling method testHeartbeatBlockRecovery.

/**
   * Test if
   * {@link FSNamesystem#handleHeartbeat}
   * correctly selects data node targets for block recovery.
   */
@Test
public void testHeartbeatBlockRecovery() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg1 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
        dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        final DatanodeRegistration nodeReg2 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
        final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
        dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        final DatanodeRegistration nodeReg3 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
        final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
        dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        try {
            namesystem.writeLock();
            synchronized (hm) {
                NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
                NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
                NameNodeAdapter.sendHeartBeat(nodeReg3, dd3, namesystem);
                // Test with all alive nodes.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, 0);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
                final DatanodeStorageInfo[] storages = { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0] };
                BlockInfo blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                assertEquals(3, recoveringNodes.length);
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd2);
                assertEquals(recoveringNodes[2], dd3);
                // Test with one stale node.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
                // More than the default stale interval of 30 seconds.
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
                blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                assertEquals(2, recoveringNodes.length);
                // dd2 is skipped.
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd3);
                // Test with all stale node.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, -60 * 1000);
                // More than the default stale interval of 30 seconds.
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, -80 * 1000);
                blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                // Only dd1 is included since it heart beated and hence its not stale
                // when the list of recovery blocks is constructed.
                assertEquals(3, recoveringNodes.length);
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd2);
                assertEquals(recoveringNodes[2], dd3);
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : BlockRecoveryCommand(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 94 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class BlockReportTestBase method findBlock.

private Block findBlock(Path path, long size) throws IOException {
    Block ret;
    List<LocatedBlock> lbs = cluster.getNameNodeRpc().getBlockLocations(path.toString(), FILE_START, size).getLocatedBlocks();
    LocatedBlock lb = lbs.get(lbs.size() - 1);
    // Get block from the first DN
    ret = cluster.getDataNodes().get(DN_N0).data.getStoredBlock(lb.getBlock().getBlockPoolId(), lb.getBlock().getBlockId());
    return ret;
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 95 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class BlockReportTestBase method blockReport_08.

/**
   * The test set the configuration parameters for a large block size and
   * restarts initiated single-node cluster.
   * Then it writes a file > block_size and closes it.
   * The second datanode is started in the cluster.
   * As soon as the replication process is started and at least one TEMPORARY
   * replica is found test forces BlockReport process and checks
   * if the TEMPORARY replica isn't reported on it.
   * Eventually, the configuration is being restored into the original state.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 300000)
public void blockReport_08() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    final int DN_N1 = DN_N0 + 1;
    final int bytesChkSum = 1024 * 1000;
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
    shutDownCluster();
    startUpCluster();
    try {
        ArrayList<Block> blocks = writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
        Block bl = findBlock(filePath, 12 * bytesChkSum);
        BlockChecker bc = new BlockChecker(filePath);
        bc.start();
        waitForTempReplica(bl, DN_N1);
        // all blocks belong to the same file, hence same BP
        DataNode dn = cluster.getDataNodes().get(DN_N1);
        String poolId = cluster.getNamesystem().getBlockPoolId();
        DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
        StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
        sendBlockReports(dnR, poolId, reports);
        printStats();
        assertEquals("Wrong number of PendingReplication blocks", blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
        try {
            bc.join();
        } catch (InterruptedException e) {
        }
    } finally {
        // return the initial state of the configuration
        resetConfiguration();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10