use of javax.management.MBeanServer in project hadoop by apache.
the class TestFSNamesystemMBean method testFsEditLogMetrics.
@Test(timeout = 120000)
public void testFsEditLogMetrics() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFs = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final int NUM_OPS = 10;
for (int i = 0; i < NUM_OPS; i++) {
final Path path = new Path(String.format("/user%d", i));
fs.mkdirs(path);
}
long syncCount = (long) mbs.getAttribute(mxbeanNameFs, "TotalSyncCount");
String syncTimes = (String) mbs.getAttribute(mxbeanNameFs, "TotalSyncTimes");
assertTrue(syncCount > 0);
assertNotNull(syncTimes);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestDataNodeMXBean method testDataNodeMXBean.
@Test
public void testDataNodeMXBean() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
List<DataNode> datanodes = cluster.getDataNodes();
Assert.assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
Assert.assertEquals(datanode.getClusterId(), clusterId);
// get attribute "Version"
String version = (String) mbs.getAttribute(mxbeanName, "Version");
Assert.assertEquals(datanode.getVersion(), version);
// get attribute "SotfwareVersion"
String softwareVersion = (String) mbs.getAttribute(mxbeanName, "SoftwareVersion");
Assert.assertEquals(datanode.getSoftwareVersion(), softwareVersion);
Assert.assertEquals(version, softwareVersion + ", r" + datanode.getRevision());
// get attribute "RpcPort"
String rpcPort = (String) mbs.getAttribute(mxbeanName, "RpcPort");
Assert.assertEquals(datanode.getRpcPort(), rpcPort);
// get attribute "HttpPort"
String httpPort = (String) mbs.getAttribute(mxbeanName, "HttpPort");
Assert.assertEquals(datanode.getHttpPort(), httpPort);
// get attribute "NamenodeAddresses"
String namenodeAddresses = (String) mbs.getAttribute(mxbeanName, "NamenodeAddresses");
Assert.assertEquals(datanode.getNamenodeAddresses(), namenodeAddresses);
// get attribute "getVolumeInfo"
String volumeInfo = (String) mbs.getAttribute(mxbeanName, "VolumeInfo");
Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()), replaceDigits(volumeInfo));
// Ensure mxbean's XceiverCount is same as the DataNode's
// live value.
int xceiverCount = (Integer) mbs.getAttribute(mxbeanName, "XceiverCount");
Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
// Ensure mxbean's XmitsInProgress is same as the DataNode's
// live value.
int xmitsInProgress = (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
String bpActorInfo = (String) mbs.getAttribute(mxbeanName, "BPServiceActorInfo");
Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
String slowDisks = (String) mbs.getAttribute(mxbeanName, "SlowDisks");
Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestDataNodeMXBean method testDataNodeMXBeanSlowDisksEnabled.
@Test
public void testDataNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
conf.setDouble(DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
List<DataNode> datanodes = cluster.getDataNodes();
Assert.assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
String slowDiskPath = "test/data1/slowVolume";
datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
String slowDisks = (String) mbs.getAttribute(mxbeanName, "SlowDisks");
Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
Assert.assertTrue(slowDisks.contains(slowDiskPath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestDataNodeVolumeFailureReporting method testHotSwapOutFailedVolumeAndReporting.
/**
* Verify DataNode NumFailedVolumes and FailedStorageLocations
* after hot swap out of failed volume.
*/
@Test
public void testHotSwapOutFailedVolumeAndReporting() throws Exception {
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final DataNode dn0 = cluster.getDataNodes().get(0);
final String oldDataDirs = dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=FSDatasetState-" + dn0.getDatanodeUuid());
int numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes);
checkFailuresAtDataNode(dn0, 0, false, new String[] {});
// Fail dn0Vol1 first.
// Verify NumFailedVolumes and FailedStorageLocations are empty.
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
DataNodeTestUtils.waitForDiskError(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
Assert.assertEquals(1, numFailedVolumes);
Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes);
checkFailuresAtDataNode(dn0, 1, true, new String[] { dn0Vol1.getAbsolutePath() });
// Verify NumFailedVolumes and FailedStorageLocations haven't changed.
try {
dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, oldDataDirs);
fail("Reconfigure with failed disk should throw exception.");
} catch (ReconfigurationException e) {
Assert.assertTrue("Reconfigure exception doesn't have expected path!", e.getCause().getMessage().contains(dn0Vol1.getAbsolutePath()));
}
numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
Assert.assertEquals(1, numFailedVolumes);
Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes);
checkFailuresAtDataNode(dn0, 1, true, new String[] { dn0Vol1.getAbsolutePath() });
// Hot swap out the failed volume.
// Verify NumFailedVolumes and FailedStorageLocations are reset.
String dataDirs = dn0Vol2.getPath();
dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirs);
numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
Assert.assertEquals(0, numFailedVolumes);
Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes);
checkFailuresAtDataNode(dn0, 0, true, new String[] {});
// Fix failure volume dn0Vol1 and remount it back.
// Verify NumFailedVolumes and FailedStorageLocations are empty.
DataNodeTestUtils.restoreDataDirFromFailure(dn0Vol1);
dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, oldDataDirs);
numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
Assert.assertEquals(0, numFailedVolumes);
Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes);
checkFailuresAtDataNode(dn0, 0, true, new String[] {});
// Fail dn0Vol2.
// Verify NumFailedVolumes and FailedStorageLocations are updated.
DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
DataNodeTestUtils.waitForDiskError(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol2));
numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
Assert.assertEquals(1, numFailedVolumes);
Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes);
checkFailuresAtDataNode(dn0, 1, true, new String[] { dn0Vol2.getAbsolutePath() });
// Verify DataNode tolerating one disk failure.
assertTrue(dn0.shouldRun());
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestStartup method testStorageBlockContentsStaleAfterNNRestart.
/**
* Verify the following scenario.
* 1. NN restarts.
* 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
* 3. After reregistration completes, DN will send Heartbeat, followed by
* Blockreport.
* 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
* @throws Exception
*/
@Test(timeout = 60000)
public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
MiniDFSCluster dfsCluster = null;
try {
Configuration config = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
dfsCluster.waitActive();
dfsCluster.restartNameNode(true);
BlockManagerTestUtil.checkHeartbeat(dfsCluster.getNamesystem().getBlockManager());
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
Integer numStaleStorages = (Integer) (mbs.getAttribute(mxbeanNameFsns, "NumStaleStorages"));
assertEquals(0, numStaleStorages.intValue());
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
return;
}
Aggregations