use of org.apache.hadoop.yarn.util.ProcfsBasedProcessTree in project hadoop by apache.
the class TestContainersMonitor method testProcessTreeLimits.
/**
* Test to verify the check for whether a process tree is over limit or not.
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test
public void testProcessTreeLimits() throws IOException {
// set up a dummy proc file system
File procfsRootDir = new File(localDir, "proc");
String[] pids = { "100", "200", "300", "400", "500", "600", "700" };
try {
TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir);
// create pid dirs.
TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir, pids);
// create process infos.
TestProcfsBasedProcessTree.ProcessStatInfo[] procs = new TestProcfsBasedProcessTree.ProcessStatInfo[7];
// assume pids 100, 500 are in 1 tree
// 200,300,400 are in another
// 600,700 are in a third
procs[0] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "100", "proc1", "1", "100", "100", "100000" });
procs[1] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "200", "proc2", "1", "200", "200", "200000" });
procs[2] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "300", "proc3", "200", "200", "200", "300000" });
procs[3] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "400", "proc4", "200", "200", "200", "400000" });
procs[4] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "500", "proc5", "100", "100", "100", "1500000" });
procs[5] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "600", "proc6", "1", "600", "600", "100000" });
procs[6] = new TestProcfsBasedProcessTree.ProcessStatInfo(new String[] { "700", "proc7", "600", "600", "600", "100000" });
// write stat files.
TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir, pids, procs, null);
// vmem limit
long limit = 700000;
ContainersMonitorImpl test = new ContainersMonitorImpl(null, null, null);
// create process trees
// tree rooted at 100 is over limit immediately, as it is
// twice over the mem limit.
ProcfsBasedProcessTree pTree = new ProcfsBasedProcessTree("100", procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertTrue("tree rooted at 100 should be over limit " + "after first iteration.", test.isProcessTreeOverLimit(pTree, "dummyId", limit));
// the tree rooted at 200 is initially below limit.
pTree = new ProcfsBasedProcessTree("200", procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 200 shouldn't be over limit " + "after one iteration.", test.isProcessTreeOverLimit(pTree, "dummyId", limit));
// second iteration - now the tree has been over limit twice,
// hence it should be declared over limit.
pTree.updateProcessTree();
assertTrue("tree rooted at 200 should be over limit after 2 iterations", test.isProcessTreeOverLimit(pTree, "dummyId", limit));
// the tree rooted at 600 is never over limit.
pTree = new ProcfsBasedProcessTree("600", procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.", test.isProcessTreeOverLimit(pTree, "dummyId", limit));
// another iteration does not make any difference.
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.", test.isProcessTreeOverLimit(pTree, "dummyId", limit));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
Aggregations