use of org.apache.hadoop.yarn.server.MiniYARNCluster in project cdap by caskdata.
the class YarnOperationalStatsTest method createYarnCluster.
@Override
protected MiniYARNCluster createYarnCluster() throws IOException, InterruptedException, YarnException {
MiniYARNCluster yarnCluster = new MiniYARNCluster(getClass().getName(), 1, 1, 1);
yarnCluster.init(new Configuration());
yarnCluster.start();
return yarnCluster;
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project cdap by caskdata.
the class YarnRMHAOperationalStatsTest method createYarnCluster.
@Override
protected MiniYARNCluster createYarnCluster() throws IOException, InterruptedException, YarnException {
Configuration hConf = new Configuration();
hConf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
String hostname = MiniYARNCluster.getHostname();
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
hConf.set(HAUtil.addSuffix(confKey, "rm0"), hostname + ":" + Networks.getRandomPort());
hConf.set(HAUtil.addSuffix(confKey, "rm1"), hostname + ":" + Networks.getRandomPort());
}
MiniYARNCluster yarnCluster = new MiniYARNCluster(getClass().getName(), 2, 2, 2, 2);
yarnCluster.init(hConf);
yarnCluster.start();
yarnCluster.getResourceManager(0).getRMContext().getRMAdminService().transitionToActive(new HAServiceProtocol.StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER));
return yarnCluster;
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project kitten by cloudera.
the class TestKittenDistributedShell method setup.
@BeforeClass
public static void setup() throws InterruptedException, IOException {
LOG.info("Starting up YARN cluster");
conf.setInt("yarn.scheduler.fifo.minimum-allocation-mb", 128);
conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.0");
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(TestKittenDistributedShell.class.getName(), 1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
conf = yarnCluster.getConfig();
}
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project metron by apache.
the class YarnComponent method start.
@Override
public void start() throws UnableToStartException {
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
conf.set("yarn.log.dir", "target");
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
try {
yarnCluster = new MiniYARNCluster(testName, 1, NUM_NMS, 1, 1, true);
yarnCluster.init(conf);
yarnCluster.start();
waitForNMsToRegister();
URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
if (url == null) {
throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
}
Configuration yarnClusterConfig = yarnCluster.getConfig();
yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent());
// write the document to a buffer (not directly to the file, as that
// can cause the file being written to get read -which will then fail.
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
yarnClusterConfig.writeXml(bytesOut);
bytesOut.close();
// write the bytes to the file in the classpath
OutputStream os = new FileOutputStream(new File(url.getPath()));
os.write(bytesOut.toByteArray());
os.close();
FileContext fsContext = FileContext.getLocalFSFileContext();
fsContext.delete(new Path(conf.get("yarn.timeline-service.leveldb-timeline-store.path")), true);
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
}
} catch (Exception e) {
throw new UnableToStartException("Exception setting up yarn cluster", e);
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project zeppelin by apache.
the class MiniHadoopCluster method start.
public void start() throws IOException {
LOGGER.info("Starting MiniHadoopCluster ...");
new File(configPath).mkdirs();
// start MiniDFSCluster
this.dfsCluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(2).format(true).waitSafeMode(true).build();
this.dfsCluster.waitActive();
saveConfig(hadoopConf, configPath + "/core-site.xml");
// start MiniYarnCluster
YarnConfiguration baseConfig = new YarnConfiguration(hadoopConf);
baseConfig.set("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage", "98");
baseConfig.set("yarn.scheduler.capacity.maximum-am-resource-percent", "1.0");
// the following 2 config are required by hadoop3
baseConfig.set("yarn.scheduler.capacity.root.queues", "default");
baseConfig.set("yarn.scheduler.capacity.root.default.capacity", "100");
this.yarnCluster = new MiniYARNCluster(getClass().getName(), 2, 1, 1);
yarnCluster.init(baseConfig);
// Install a shutdown hook for stop the service and kill all running applications.
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
yarnCluster.stop();
}
});
yarnCluster.start();
// Workaround for YARN-2642.
Configuration yarnConfig = yarnCluster.getConfig();
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < 30 * 1000) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new IOException(e);
}
if (!yarnConfig.get(YarnConfiguration.RM_ADDRESS).split(":")[1].equals("0")) {
break;
}
}
if (yarnConfig.get(YarnConfiguration.RM_ADDRESS).split(":")[1].equals("0")) {
throw new IOException("RM not up yes");
}
LOGGER.info("RM address in configuration is " + yarnConfig.get(YarnConfiguration.RM_ADDRESS));
saveConfig(yarnConfig, configPath + "/yarn-site.xml");
}
Aggregations