Search in sources :

Example 61 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.

/** 
   * Test  for
   * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
   * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        //create a file
        DistributedFileSystem dfs = cluster.getFileSystem();
        String filestr = "/foo";
        Path filepath = new Path(filestr);
        DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
        //get block info
        final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
        final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
        Assert.assertTrue(datanodeinfo.length > 0);
        //get DataNode and FSDataset objects
        final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
        Assert.assertTrue(datanode != null);
        //initReplicaRecovery
        final ExtendedBlock b = locatedblock.getBlock();
        final long recoveryid = b.getGenerationStamp() + 1;
        final long newlength = b.getNumBytes() - 1;
        final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
        final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
        //check replica
        final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
        Assert.assertEquals(ReplicaState.RUR, replica.getState());
        //check meta data before update
        cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
        //case "THIS IS NOT SUPPOSED TO HAPPEN"
        //with (block length) != (stored replica's on disk length). 
        {
            //create a block with same id and gs but different length.
            final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
            try {
                //update should fail
                fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
                Assert.fail();
            } catch (IOException ioe) {
                System.out.println("GOOD: getting " + ioe);
            }
        }
        //update
        final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
        assertTrue(r != null);
        assertTrue(r.getStorageUuid() != null);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) Test(org.junit.Test)

Example 62 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestDiskBalancerCommand method testDiskBalancerQueryWithoutSubmit.

/**
   * Making sure that we can query the node without having done a submit.
   * @throws Exception
   */
@Test
public void testDiskBalancerQueryWithoutSubmit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        miniDFSCluster.waitActive();
        DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
        final String queryArg = String.format("-query localhost:%d", dataNode.getIpcPort());
        final String cmdLine = String.format("hdfs diskbalancer %s", queryArg);
        runCommand(cmdLine);
    } finally {
        miniDFSCluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 63 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeMXBean method testNameNodeMXBeanInfo.

@SuppressWarnings({ "unchecked" })
@Test
public void testNameNodeMXBeanInfo() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        // Set upgrade domain on the first DN.
        String upgradeDomain = "abcd";
        DatanodeManager dm = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager();
        DatanodeDescriptor dd = dm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
        dd.setUpgradeDomain(upgradeDomain);
        String dnXferAddrWithUpgradeDomainSet = dd.getXferAddr();
        // Put the second DN to maintenance state.
        DatanodeDescriptor maintenanceNode = dm.getDatanode(cluster.getDataNodes().get(1).getDatanodeId());
        maintenanceNode.setInMaintenance();
        String dnXferAddrInMaintenance = maintenanceNode.getXferAddr();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        // get attribute "ClusterId"
        String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
        assertEquals(fsn.getClusterId(), clusterId);
        // get attribute "BlockPoolId"
        String blockpoolId = (String) mbs.getAttribute(mxbeanName, "BlockPoolId");
        assertEquals(fsn.getBlockPoolId(), blockpoolId);
        // get attribute "Version"
        String version = (String) mbs.getAttribute(mxbeanName, "Version");
        assertEquals(fsn.getVersion(), version);
        assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
        // get attribute "Used"
        Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
        assertEquals(fsn.getUsed(), used.longValue());
        // get attribute "Total"
        Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
        assertEquals(fsn.getTotal(), total.longValue());
        // get attribute "safemode"
        String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
        assertEquals(fsn.getSafemode(), safemode);
        // get attribute nondfs
        Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
        assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
        // get attribute percentremaining
        Float percentremaining = (Float) (mbs.getAttribute(mxbeanName, "PercentRemaining"));
        assertEquals(fsn.getPercentRemaining(), percentremaining, DELTA);
        // get attribute Totalblocks
        Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
        assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
        // get attribute alivenodeinfo
        String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
        assertTrue(liveNodes.size() == 2);
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
            assertTrue(((Long) liveNode.get("nonDfsUsedSpace")) >= 0);
            assertTrue(liveNode.containsKey("capacity"));
            assertTrue(((Long) liveNode.get("capacity")) > 0);
            assertTrue(liveNode.containsKey("numBlocks"));
            assertTrue(((Long) liveNode.get("numBlocks")) == 0);
            assertTrue(liveNode.containsKey("lastBlockReport"));
            // a. By default the upgrade domain isn't defined on any DN.
            // b. If the upgrade domain is set on a DN, JMX should have the same
            // value.
            String xferAddr = (String) liveNode.get("xferaddr");
            if (!xferAddr.equals(dnXferAddrWithUpgradeDomainSet)) {
                assertTrue(!liveNode.containsKey("upgradeDomain"));
            } else {
                assertTrue(liveNode.get("upgradeDomain").equals(upgradeDomain));
            }
            // "adminState" is set to maintenance only for the specific dn.
            boolean inMaintenance = liveNode.get("adminState").equals(DatanodeInfo.AdminStates.IN_MAINTENANCE.toString());
            assertFalse(xferAddr.equals(dnXferAddrInMaintenance) ^ inMaintenance);
        }
        assertEquals(fsn.getLiveNodes(), alivenodeinfo);
        // get attributes DeadNodes
        String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
        assertEquals(fsn.getDeadNodes(), deadNodeInfo);
        // get attribute NodeUsage
        String nodeUsage = (String) (mbs.getAttribute(mxbeanName, "NodeUsage"));
        assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
        // get attribute NameJournalStatus
        String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus"));
        assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
        // get attribute JournalTransactionInfo
        String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, "JournalTransactionInfo");
        assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), journalTxnInfo);
        // get attribute "CompileInfo"
        String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
        assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
        // get attribute CorruptFiles
        String corruptFiles = (String) (mbs.getAttribute(mxbeanName, "CorruptFiles"));
        assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(), corruptFiles);
        // get attribute NameDirStatuses
        String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
        assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
        Map<String, Map<String, String>> statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
        Collection<URI> nameDirUris = cluster.getNameDirs(0);
        for (URI nameDirUri : nameDirUris) {
            File nameDir = new File(nameDirUri);
            System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
            assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
        }
        assertEquals(2, statusMap.get("active").size());
        assertEquals(0, statusMap.get("failed").size());
        // This will cause the first dir to fail.
        File failedNameDir = new File(nameDirUris.iterator().next());
        assertEquals(0, FileUtil.chmod(new File(failedNameDir, "current").getAbsolutePath(), "000"));
        cluster.getNameNodeRpc().rollEditLog();
        nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
        statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
        for (URI nameDirUri : nameDirUris) {
            File nameDir = new File(nameDirUri);
            String expectedStatus = nameDir.equals(failedNameDir) ? "failed" : "active";
            System.out.println("Checking for the presence of " + nameDir + " in " + expectedStatus + " name dirs.");
            assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
        }
        assertEquals(1, statusMap.get("active").size());
        assertEquals(1, statusMap.get("failed").size());
        assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
        assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(), mbs.getAttribute(mxbeanName, "CacheCapacity"));
        assertNull("RollingUpgradeInfo should be null when there is no rolling" + " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
    } finally {
        if (cluster != null) {
            for (URI dir : cluster.getNameDirs(0)) {
                FileUtil.chmod(new File(new File(dir), "current").getAbsolutePath(), "755");
            }
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) ObjectName(javax.management.ObjectName) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Map(java.util.Map) HashMap(java.util.HashMap) File(java.io.File) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 64 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeMXBean method testTopUsers.

@Test(timeout = 120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
        FileSystem fs = cluster.getFileSystem();
        final Path path = new Path("/");
        final int NUM_OPS = 10;
        for (int i = 0; i < NUM_OPS; i++) {
            fs.listStatus(path);
            fs.setTimes(path, 0, 1);
        }
        String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
        ObjectMapper mapper = new ObjectMapper();
        Map<String, Object> map = mapper.readValue(topUsers, Map.class);
        assertTrue("Could not find map key timestamp", map.containsKey("timestamp"));
        assertTrue("Could not find map key windows", map.containsKey("windows"));
        List<Map<String, List<Map<String, Object>>>> windows = (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
        assertEquals("Unexpected num windows", 3, windows.size());
        for (Map<String, List<Map<String, Object>>> window : windows) {
            final List<Map<String, Object>> ops = window.get("ops");
            assertEquals("Unexpected num ops", 3, ops.size());
            for (Map<String, Object> op : ops) {
                final long count = Long.parseLong(op.get("totalCount").toString());
                final String opType = op.get("opType").toString();
                final int expected;
                if (opType.equals(TopConf.ALL_CMDS)) {
                    expected = 2 * NUM_OPS;
                } else {
                    expected = NUM_OPS;
                }
                assertEquals("Unexpected total count", expected, count);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ObjectName(javax.management.ObjectName) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 65 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeMXBean method testQueueLength.

@Test(timeout = 120000)
public void testQueueLength() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanNameFs = new ObjectName("Hadoop:service=NameNode,name=FSNamesystem");
        int queueLength = (int) mbs.getAttribute(mxbeanNameFs, "LockQueueLength");
        assertEquals(0, queueLength);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24