use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class DummySocketFactory method initAndStartMiniMRYarnCluster.
private MiniMRYarnCluster initAndStartMiniMRYarnCluster(JobConf jobConf) {
MiniMRYarnCluster miniMRYarnCluster;
miniMRYarnCluster = new MiniMRYarnCluster(this.getClass().getName(), 1);
miniMRYarnCluster.init(jobConf);
miniMRYarnCluster.start();
return miniMRYarnCluster;
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class TestMRAMWithNonNormalizedCapabilities method setup.
@Before
public void setup() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(getClass().getSimpleName());
mrCluster.init(new Configuration());
mrCluster.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class TestMRTimelineEventHandling method testMRTimelineEventHandling.
@Test
public void testMRTimelineEventHandling() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
String localPathRoot = System.getProperty("test.build.data", "build/test/data");
Path inDir = new Path(localPathRoot, "input");
Path outDir = new Path(localPathRoot, "output");
RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(tEntity.getEvents().size() - 1).getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(0).getEventType());
job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.FAILED, job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(2, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(tEntity.getEvents().size() - 1).getEventType());
Assert.assertEquals(EventType.JOB_FAILED.toString(), tEntity.getEvents().get(0).getEventType());
} finally {
if (cluster != null) {
cluster.stop();
}
}
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class TestMRTimelineEventHandling method testMRNewTimelineServiceEventHandling.
@SuppressWarnings("deprecation")
@Test
public void testMRNewTimelineServiceEventHandling() throws Exception {
LOG.info("testMRNewTimelineServiceEventHandling start.");
String testDir = new File("target", getClass().getSimpleName() + "-test_dir").getAbsolutePath();
String storageDir = testDir + File.separator + "timeline_service_data";
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
// enable new timeline service
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
// set the file system root directory
conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT, storageDir);
// enable aux-service based timeline collectors
conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + TIMELINE_AUX_SERVICE_NAME + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1, true);
cluster.init(conf);
cluster.start();
LOG.info("A MiniMRYarnCluster get start.");
Path inDir = new Path(testDir, "input");
Path outDir = new Path(testDir, "output");
LOG.info("Run 1st job which should be successful.");
JobConf successConf = new JobConf(conf);
successConf.set("dummy_conf1", UtilsForTests.createConfigValue(51 * 1024));
successConf.set("dummy_conf2", UtilsForTests.createConfigValue(51 * 1024));
successConf.set("huge_dummy_conf1", UtilsForTests.createConfigValue(101 * 1024));
successConf.set("huge_dummy_conf2", UtilsForTests.createConfigValue(101 * 1024));
RunningJob job = UtilsForTests.runJobSucceed(successConf, inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(new Configuration(cluster.getConfig()));
yarnClient.start();
EnumSet<YarnApplicationState> appStates = EnumSet.allOf(YarnApplicationState.class);
ApplicationId firstAppId = null;
List<ApplicationReport> apps = yarnClient.getApplications(appStates);
Assert.assertEquals(apps.size(), 1);
ApplicationReport appReport = apps.get(0);
firstAppId = appReport.getApplicationId();
UtilsForTests.waitForAppFinished(job, cluster);
checkNewTimelineEvent(firstAppId, appReport, storageDir);
LOG.info("Run 2nd job which should be failed.");
job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.FAILED, job.getJobStatus().getState().getValue());
apps = yarnClient.getApplications(appStates);
Assert.assertEquals(apps.size(), 2);
appReport = apps.get(0).getApplicationId().equals(firstAppId) ? apps.get(0) : apps.get(1);
checkNewTimelineEvent(firstAppId, appReport, storageDir);
} finally {
if (cluster != null) {
cluster.stop();
}
// Cleanup test file
File testDirFolder = new File(testDir);
if (testDirFolder.isDirectory()) {
FileUtils.deleteDirectory(testDirFolder);
}
}
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class MiniMRClientClusterFactory method create.
public static MiniMRClientCluster create(Class<?> caller, String identifier, int noOfNMs, Configuration conf) throws IOException {
if (conf == null) {
conf = new Configuration();
}
FileSystem fs = FileSystem.get(conf);
Path testRootDir = new Path("target", identifier + "-tmpDir").makeQualified(fs);
Path appJar = new Path(testRootDir, "MRAppJar.jar");
// Copy MRAppJar and make it private.
Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);
fs.copyFromLocalFile(appMasterJar, appJar);
fs.setPermission(appJar, new FsPermission("744"));
Job job = Job.getInstance(conf);
job.addFileToClassPath(appJar);
Path callerJar = new Path(JarFinder.getJar(caller));
Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
fs.copyFromLocalFile(callerJar, remoteCallerJar);
fs.setPermission(remoteCallerJar, new FsPermission("744"));
job.addFileToClassPath(remoteCallerJar);
MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier, noOfNMs);
job.getConfiguration().set("minimrclientcluster.caller.name", identifier);
job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number", noOfNMs);
miniMRYarnCluster.init(job.getConfiguration());
miniMRYarnCluster.start();
return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
Aggregations