Search in sources :

Example 26 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeRegistration method testForcedRegistration.

// IBRs are async operations to free up IPC handlers.  This means the IBR
// response will not contain non-IPC level exceptions - which in practice
// should not occur other than dead/unregistered node which will trigger a
// re-registration.  If a non-IPC exception does occur, the safety net is
// a forced re-registration on the next heartbeat.
@Test
public void testForcedRegistration() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 4);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, Integer.MAX_VALUE);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    cluster.getHttpUri(0);
    FSNamesystem fsn = cluster.getNamesystem();
    String bpId = fsn.getBlockPoolId();
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
    // registration should not change after heartbeat.
    assertTrue(dnd.isRegistered());
    DatanodeRegistration lastReg = dn.getDNRegistrationForBP(bpId);
    waitForHeartbeat(dn, dnd);
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    // force a re-registration on next heartbeat.
    dnd.setForceRegistration(true);
    assertFalse(dnd.isRegistered());
    waitForHeartbeat(dn, dnd);
    assertTrue(dnd.isRegistered());
    DatanodeRegistration newReg = dn.getDNRegistrationForBP(bpId);
    assertNotSame(lastReg, newReg);
    lastReg = newReg;
    // registration should not change on subsequent heartbeats.
    waitForHeartbeat(dn, dnd);
    assertTrue(dnd.isRegistered());
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    assertTrue(waitForBlockReport(dn, dnd));
    assertTrue(dnd.isRegistered());
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    // check that block report is not processed and registration didn't change.
    dnd.setForceRegistration(true);
    assertFalse(waitForBlockReport(dn, dnd));
    assertFalse(dnd.isRegistered());
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    // heartbeat should trigger re-registration, and next block report should
    // not change registration.
    waitForHeartbeat(dn, dnd);
    assertTrue(dnd.isRegistered());
    newReg = dn.getDNRegistrationForBP(bpId);
    assertNotSame(lastReg, newReg);
    lastReg = newReg;
    assertTrue(waitForBlockReport(dn, dnd));
    assertTrue(dnd.isRegistered());
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    // registration doesn't change.
    ExtendedBlock eb = new ExtendedBlock(bpId, 1234);
    dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
    DataNodeTestUtils.triggerDeletionReport(dn);
    assertTrue(dnd.isRegistered());
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    // a failed IBR will effectively unregister the node.
    boolean failed = false;
    try {
        // pass null to cause a failure since there aren't any easy failure
        // modes since it shouldn't happen.
        fsn.processIncrementalBlockReport(lastReg, null);
    } catch (NullPointerException npe) {
        failed = true;
    }
    assertTrue("didn't fail", failed);
    assertFalse(dnd.isRegistered());
    // should remain unregistered until next heartbeat.
    dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
    DataNodeTestUtils.triggerDeletionReport(dn);
    assertFalse(dnd.isRegistered());
    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
    waitForHeartbeat(dn, dnd);
    assertTrue(dnd.isRegistered());
    assertNotSame(lastReg, dn.getDNRegistrationForBP(bpId));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 27 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestFileCorruption method testArrayOutOfBoundsException.

/** Test the case that a replica is reported corrupt while it is not
   * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
   * See Hadoop-4351.
   */
@Test
public void testArrayOutOfBoundsException() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        final Path FILE_PATH = new Path("/tmp.txt");
        final long FILE_LEN = 1L;
        DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 2, 1L);
        // get the block
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
        assertFalse("Data directory does not contain any blocks or there was an " + "IO error", blk == null);
        // start a third datanode
        cluster.startDataNodes(conf, 1, true, null, null);
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 3);
        DataNode dataNode = datanodes.get(2);
        // report corrupted block by the third datanode
        DatanodeRegistration dnR = InternalDataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
        FSNamesystem ns = cluster.getNamesystem();
        ns.writeLock();
        try {
            cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk, new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST", "STORAGE_ID");
        } finally {
            ns.writeUnlock();
        }
        // open the file
        fs.open(FILE_PATH);
        //clean up
        fs.delete(FILE_PATH, false);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 28 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockManager method testSafeModeIBRAfterIncremental.

@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
    DatanodeDescriptor node = spy(nodes.get(0));
    DatanodeStorageInfo ds = node.getStorageInfos()[0];
    node.setAlive(true);
    DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
    // pretend to be in safemode
    doReturn(true).when(fsn).isInStartupSafeMode();
    // register new node
    bm.getDatanodeManager().registerDatanode(nodeReg);
    // swap in spy    
    bm.getDatanodeManager().addDatanode(node);
    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
    assertEquals(0, ds.getBlockReportCount());
    // send block report while pretending to already have blocks
    reset(node);
    doReturn(1).when(node).numBlocks();
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null);
    assertEquals(1, ds.getBlockReportCount());
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Example 29 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockManager method testFullBR.

@Test
public void testFullBR() throws Exception {
    doReturn(true).when(fsn).isRunning();
    DatanodeDescriptor node = nodes.get(0);
    DatanodeStorageInfo ds = node.getStorageInfos()[0];
    node.setAlive(true);
    DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
    // register new node
    bm.getDatanodeManager().registerDatanode(nodeReg);
    bm.getDatanodeManager().addDatanode(node);
    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
    assertEquals(0, ds.getBlockReportCount());
    ArrayList<BlockInfo> blocks = new ArrayList<>();
    for (int id = 24; id > 0; id--) {
        blocks.add(addBlockToBM(id));
    }
    // Make sure it's the first full report
    assertEquals(0, ds.getBlockReportCount());
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), generateReport(blocks), new BlockReportContext(1, 0, System.nanoTime(), 0, false));
    assertEquals(1, ds.getBlockReportCount());
    // verify the storage info is correct
    for (BlockInfo block : blocks) {
        assertTrue(bm.getStoredBlock(block).findStorageInfo(ds) >= 0);
    }
    // Send unsorted report
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), generateReport(blocks), new BlockReportContext(1, 0, System.nanoTime(), 0, false));
    assertEquals(2, ds.getBlockReportCount());
    // verify the storage info is correct
    for (BlockInfo block : blocks) {
        assertTrue(bm.getStoredBlock(block).findStorageInfo(ds) >= 0);
    }
    // Sort list and send a sorted report
    Collections.sort(blocks);
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), generateReport(blocks), new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    assertEquals(3, ds.getBlockReportCount());
    // verify the storage info is correct
    for (BlockInfo block : blocks) {
        assertTrue(bm.getStoredBlock(block).findStorageInfo(ds) >= 0);
    }
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 30 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockManager method testStorageWithRemainingCapacity.

/**
   * Tests that a namenode doesn't choose a datanode with full disks to 
   * store blocks.
   * @throws Exception
   */
@Test
public void testStorageWithRemainingCapacity() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = FileSystem.get(conf);
    Path file1 = null;
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
        //create a file with 100k.
        for (DatanodeStorageInfo storage : dd.getStorageInfos()) {
            storage.setUtilizationForTesting(65536, 0, 65536, 0);
        }
        //sum of the remaining capacity of both the storages
        dd.setRemaining(131072);
        file1 = new Path("testRemainingStorage.dat");
        try {
            DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short) 1, 0x1BAD5EED);
        } catch (RemoteException re) {
            GenericTestUtils.assertExceptionContains("nodes instead of " + "minReplication", re);
        }
    } finally {
        // Clean up
        assertTrue(fs.exists(file1));
        fs.delete(file1, true);
        assertTrue(!fs.exists(file1));
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5