Search in sources :

Example 31 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestNameNodePrunesMissingStorages method runTest.

private static void runTest(final String testCaseName, final boolean createFiles, final int numInitialStorages, final int expectedStoragesAfterTest) throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storagesPerDatanode(numInitialStorages).build();
        cluster.waitActive();
        final DataNode dn0 = cluster.getDataNodes().get(0);
        // Ensure NN knows about the storage.
        final DatanodeID dnId = dn0.getDatanodeId();
        final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
        assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
        DataNodeTestUtils.triggerBlockReport(dn0);
        if (createFiles) {
            final Path path = new Path("/", testCaseName);
            DFSTestUtil.createFile(cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
            DataNodeTestUtils.triggerBlockReport(dn0);
        }
        // Generate a fake StorageReport that is missing one storage.
        final StorageReport[] reports = dn0.getFSDataset().getStorageReports(bpid);
        final StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1];
        System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);
        // Stop the DataNode and send fake heartbeat with missing storage.
        cluster.stopDataNode(0);
        cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT);
        // Check that the missing storage was pruned.
        assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration)

Example 32 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestOverReplicatedBlocks method testChooseReplicaToDelete.

/**
   * The test verifies that replica for deletion is chosen on a node,
   * with the oldest heartbeat, when this heartbeat is larger than the
   * tolerable heartbeat interval.
   * It creates a file with several blocks and replication 4.
   * The last DN is configured to send heartbeats rarely.
   * 
   * Test waits until the tolerable heartbeat interval expires, and reduces
   * replication of the file. All replica deletions should be scheduled for the
   * last node. No replicas will actually be deleted, since last DN doesn't
   * send heartbeats. 
   */
@Test
public void testChooseReplicaToDelete() throws Exception {
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        fs = cluster.getFileSystem();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
        cluster.startDataNodes(conf, 1, true, null, null, null);
        DataNode lastDN = cluster.getDataNodes().get(3);
        DatanodeRegistration dnReg = InternalDataNodeTestUtils.getDNRegistrationForBP(lastDN, namesystem.getBlockPoolId());
        String lastDNid = dnReg.getDatanodeUuid();
        final Path fileName = new Path("/foo2");
        DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short) 4, 0L);
        DFSTestUtil.waitReplication(fs, fileName, (short) 4);
        // Wait for tolerable number of heartbeats plus one
        DatanodeDescriptor nodeInfo = null;
        long lastHeartbeat = 0;
        long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
        do {
            nodeInfo = bm.getDatanodeManager().getDatanode(dnReg);
            lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
        } while (monotonicNow() - lastHeartbeat < waitTime);
        fs.setReplication(fileName, (short) 3);
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
        // All replicas for deletion should be scheduled on lastDN.
        // And should not actually be deleted, because lastDN does not heartbeat.
        namesystem.readLock();
        final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
        assertEquals("Replicas on node " + lastDNid + " should have been deleted", SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
        namesystem.readUnlock();
        for (BlockLocation location : locs) assertEquals("Block should still have 4 replicas", 4, location.getNames().length);
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockLocation(org.apache.hadoop.fs.BlockLocation) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 33 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestComputeInvalidateWork method testDatanodeReformat.

/**
   * Reformatted DataNodes will replace the original UUID in the
   * {@link DatanodeManager#datanodeMap}. This tests if block
   * invalidation work on the original DataNode can be skipped.
   */
@Test(timeout = 120000)
public void testDatanodeReformat() throws Exception {
    namesystem.writeLock();
    try {
        // Change the datanode UUID to emulate a reformat
        String poolId = cluster.getNamesystem().getBlockPoolId();
        DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort()).getDNRegistrationForBP(poolId);
        dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr);
        cluster.stopDataNode(nodes[0].getXferAddr());
        Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP);
        bm.addToInvalidates(block, nodes[0]);
        bm.getDatanodeManager().registerDatanode(dnr);
        // Since UUID has changed, the invalidation work should be skipped
        assertEquals(0, bm.computeInvalidateWork(1));
        assertEquals(0, bm.getPendingDeletionBlocksCount());
    } finally {
        namesystem.writeUnlock();
    }
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 34 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestComputeInvalidateWork method testDatanodeReRegistration.

@Test(timeout = 12000)
public void testDatanodeReRegistration() throws Exception {
    // Create a test file
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Path path = new Path("/testRR");
    // Create a file and shutdown the DNs, which populates InvalidateBlocks
    DFSTestUtil.createFile(dfs, path, dfs.getDefaultBlockSize(), (short) NUM_OF_DATANODES, 0xED0ED0);
    DFSTestUtil.waitForReplication(dfs, path, (short) NUM_OF_DATANODES, 12000);
    for (DataNode dn : cluster.getDataNodes()) {
        dn.shutdown();
    }
    dfs.delete(path, false);
    namesystem.writeLock();
    InvalidateBlocks invalidateBlocks;
    int expected = NUM_OF_DATANODES;
    try {
        invalidateBlocks = (InvalidateBlocks) Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
        assertEquals("Expected invalidate blocks to be the number of DNs", (long) expected, invalidateBlocks.numBlocks());
    } finally {
        namesystem.writeUnlock();
    }
    // Re-register each DN and see that it wipes the invalidation work
    for (DataNode dn : cluster.getDataNodes()) {
        DatanodeID did = dn.getDatanodeId();
        DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.randomUUID().toString(), did), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
        namesystem.writeLock();
        try {
            bm.getDatanodeManager().registerDatanode(reg);
            expected--;
            assertEquals("Expected number of invalidate blocks to decrease", (long) expected, invalidateBlocks.numBlocks());
        } finally {
            namesystem.writeUnlock();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) Test(org.junit.Test)

Example 35 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeManager method testNumVersionsReportedCorrect.

/**
   * This test sends a random sequence of node registrations and node removals
   * to the DatanodeManager (of nodes with different IDs and versions), and
   * checks that the DatanodeManager keeps a correct count of different software
   * versions at all times.
   */
@Test
public void testNumVersionsReportedCorrect() throws IOException {
    //Create the DatanodeManager which will be tested
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
    //Seed the RNG with a known value so test failures are easier to reproduce
    Random rng = new Random();
    int seed = rng.nextInt();
    rng = new Random(seed);
    LOG.info("Using seed " + seed + " for testing");
    //A map of the Storage IDs to the DN registration it was registered with
    HashMap<String, DatanodeRegistration> sIdToDnReg = new HashMap<String, DatanodeRegistration>();
    for (int i = 0; i < NUM_ITERATIONS; ++i) {
        //If true, remove a node for every 3rd time (if there's one)
        if (rng.nextBoolean() && i % 3 == 0 && sIdToDnReg.size() != 0) {
            //Pick a random node.
            int randomIndex = rng.nextInt() % sIdToDnReg.size();
            //Iterate to that random position 
            Iterator<Map.Entry<String, DatanodeRegistration>> it = sIdToDnReg.entrySet().iterator();
            for (int j = 0; j < randomIndex - 1; ++j) {
                it.next();
            }
            DatanodeRegistration toRemove = it.next().getValue();
            LOG.info("Removing node " + toRemove.getDatanodeUuid() + " ip " + toRemove.getXferAddr() + " version : " + toRemove.getSoftwareVersion());
            //Remove that random node
            dm.removeDatanode(toRemove);
            it.remove();
        } else // Otherwise register a node. This node may be a new / an old one
        {
            //Pick a random storageID to register.
            String storageID = "someStorageID" + rng.nextInt(5000);
            DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
            Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
            //If this storageID had already been registered before
            if (sIdToDnReg.containsKey(storageID)) {
                dr = sIdToDnReg.get(storageID);
                //Half of the times, change the IP address
                if (rng.nextBoolean()) {
                    dr.setIpAddr(dr.getIpAddr() + "newIP");
                }
            } else {
                //This storageID has never been registered
                //Ensure IP address is unique to storageID
                String ip = "someIP" + storageID;
                Mockito.when(dr.getIpAddr()).thenReturn(ip);
                Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
                Mockito.when(dr.getXferPort()).thenReturn(9000);
            }
            //Pick a random version to register with
            Mockito.when(dr.getSoftwareVersion()).thenReturn("version" + rng.nextInt(5));
            LOG.info("Registering node storageID: " + dr.getDatanodeUuid() + ", version: " + dr.getSoftwareVersion() + ", IP address: " + dr.getXferAddr());
            //Register this random node
            dm.registerDatanode(dr);
            sIdToDnReg.put(storageID, dr);
        }
        //Verify DatanodeManager still has the right count
        Map<String, Integer> mapToCheck = dm.getDatanodesSoftwareVersions();
        //mapToCheck is empty
        for (Entry<String, DatanodeRegistration> it : sIdToDnReg.entrySet()) {
            String ver = it.getValue().getSoftwareVersion();
            if (!mapToCheck.containsKey(ver)) {
                throw new AssertionError("The correct number of datanodes of a " + "version was not found on iteration " + i);
            }
            mapToCheck.put(ver, mapToCheck.get(ver) - 1);
            if (mapToCheck.get(ver) == 0) {
                mapToCheck.remove(ver);
            }
        }
        for (Entry<String, Integer> entry : mapToCheck.entrySet()) {
            LOG.info("Still in map: " + entry.getKey() + " has " + entry.getValue());
        }
        assertEquals("The map of version counts returned by DatanodeManager was" + " not what it was expected to be on iteration " + i, 0, mapToCheck.size());
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Entry(java.util.Map.Entry) Random(java.util.Random) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5