use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestUnmanagedAMLauncher method setup.
@BeforeClass
public static void setup() throws InterruptedException, IOException {
LOG.info("Starting up YARN cluster");
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(TestUnmanagedAMLauncher.class.getSimpleName(), 1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
//get the address
Configuration yarnClusterConfig = yarnCluster.getConfig();
LOG.info("MiniYARN ResourceManager published address: " + yarnClusterConfig.get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniYARN ResourceManager published web address: " + yarnClusterConfig.get(YarnConfiguration.RM_WEBAPP_ADDRESS));
String webapp = yarnClusterConfig.get(YarnConfiguration.RM_WEBAPP_ADDRESS);
assertTrue("Web app address still unbound to a host at " + webapp, !webapp.startsWith("0.0.0.0"));
LOG.info("Yarn webapp is at " + webapp);
URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
if (url == null) {
throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
}
//write the document to a buffer (not directly to the file, as that
//can cause the file being written to get read -which will then fail.
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
yarnClusterConfig.writeXml(bytesOut);
bytesOut.close();
//write the bytes to the file in the classpath
OutputStream os = new FileOutputStream(new File(url.getPath()));
os.write(bytesOut.toByteArray());
os.close();
}
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestDiskFailures method testDirsFailures.
private void testDirsFailures(boolean localORLogDirs) throws IOException {
String dirType = localORLogDirs ? "local" : "log";
String dirsProperty = localORLogDirs ? YarnConfiguration.NM_LOCAL_DIRS : YarnConfiguration.NM_LOG_DIRS;
Configuration conf = new Configuration();
// set disk health check interval to a small value (say 1 sec).
conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, DISK_HEALTH_CHECK_INTERVAL);
// If 2 out of the total 4 local-dirs fail OR if 2 Out of the total 4
// log-dirs fail, then the node's health status should become unhealthy.
conf.setFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, 0.60F);
if (yarnCluster != null) {
yarnCluster.stop();
FileUtil.fullyDelete(localFSDirBase);
localFSDirBase.mkdirs();
}
LOG.info("Starting up YARN cluster");
yarnCluster = new MiniYARNCluster(TestDiskFailures.class.getName(), 1, numLocalDirs, numLogDirs);
yarnCluster.init(conf);
yarnCluster.start();
NodeManager nm = yarnCluster.getNodeManager(0);
LOG.info("Configured nm-" + dirType + "-dirs=" + nm.getConfig().get(dirsProperty));
dirsHandler = nm.getNodeHealthChecker().getDiskHandler();
List<String> list = localORLogDirs ? dirsHandler.getLocalDirs() : dirsHandler.getLogDirs();
String[] dirs = list.toArray(new String[list.size()]);
Assert.assertEquals("Number of nm-" + dirType + "-dirs is wrong.", numLocalDirs, dirs.length);
String expectedDirs = StringUtils.join(",", list);
// validate the health of disks initially
verifyDisksHealth(localORLogDirs, expectedDirs, true);
// Make 1 nm-local-dir fail and verify if "the nodemanager can identify
// the disk failure(s) and can update the list of good nm-local-dirs.
prepareDirToFail(dirs[2]);
expectedDirs = dirs[0] + "," + dirs[1] + "," + dirs[3];
verifyDisksHealth(localORLogDirs, expectedDirs, true);
// Now, make 1 more nm-local-dir/nm-log-dir fail and verify if "the
// nodemanager can identify the disk failures and can update the list of
// good nm-local-dirs/nm-log-dirs and can update the overall health status
// of the node to unhealthy".
prepareDirToFail(dirs[0]);
expectedDirs = dirs[1] + "," + dirs[3];
verifyDisksHealth(localORLogDirs, expectedDirs, false);
// Fail the remaining 2 local-dirs/log-dirs and verify if NM remains with
// empty list of local-dirs/log-dirs and the overall health status is
// unhealthy.
prepareDirToFail(dirs[1]);
prepareDirToFail(dirs[3]);
expectedDirs = "";
verifyDisksHealth(localORLogDirs, expectedDirs, false);
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project apex-core by apache.
the class StramMiniClusterTest method setup.
@BeforeClass
public static void setup() throws InterruptedException, IOException {
LOG.info("Starting up YARN cluster");
conf = StramClientUtils.addDTDefaultResources(conf);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
// workaround to avoid containers being killed because java allocated too much vmem
conf.setInt("yarn.nodemanager.vmem-pmem-ratio", 20);
conf.setStrings("yarn.scheduler.capacity.root.queues", "default");
conf.setStrings("yarn.scheduler.capacity.root.default.capacity", "100");
StringBuilder adminEnv = new StringBuilder(1024);
if (System.getenv("JAVA_HOME") == null) {
adminEnv.append("JAVA_HOME=").append(System.getProperty("java.home"));
adminEnv.append(",");
}
// see MAPREDUCE-3068, MAPREDUCE-3065
adminEnv.append("MALLOC_ARENA_MAX=4");
adminEnv.append(",");
adminEnv.append("CLASSPATH=").append(getTestRuntimeClasspath());
conf.set(YarnConfiguration.NM_ADMIN_USER_ENV, adminEnv.toString());
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(StramMiniClusterTest.class.getName(), 1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
}
conf = yarnCluster.getConfig();
URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
if (url == null) {
LOG.error("Could not find 'yarn-site.xml' dummy file in classpath");
throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
}
File confFile = new File(url.getPath());
yarnCluster.getConfig().set("yarn.application.classpath", confFile.getParent());
OutputStream os = new FileOutputStream(confFile);
LOG.debug("Conf file: {}", confFile);
yarnCluster.getConfig().writeXml(os);
os.close();
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project asterixdb by apache.
the class YARNCluster method setup.
/**
* Instantiates the (Mini) DFS Cluster with the configured number of datanodes.
* Post instantiation, data is laoded to HDFS.
* Called prior to running the Runtime test suite.
*/
public void setup() throws Exception {
conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data");
cleanupLocal();
//this constructor is deprecated in hadoop 2x
//dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null);
miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1);
miniCluster.init(conf);
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project kitten by cloudera.
the class TestKittenDistributedShell method setup.
@BeforeClass
public static void setup() throws InterruptedException, IOException {
LOG.info("Starting up YARN cluster");
conf.setInt("yarn.scheduler.fifo.minimum-allocation-mb", 128);
conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.0");
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(TestKittenDistributedShell.class.getName(), 1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
conf = yarnCluster.getConfig();
}
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
}
}
Aggregations