Search in sources :

Example 1 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class DummySocketFactory method initAndStartMiniMRYarnCluster.

private MiniMRYarnCluster initAndStartMiniMRYarnCluster(JobConf jobConf) {
    MiniMRYarnCluster miniMRYarnCluster;
    miniMRYarnCluster = new MiniMRYarnCluster(this.getClass().getName(), 1);
    miniMRYarnCluster.init(jobConf);
    miniMRYarnCluster.start();
    return miniMRYarnCluster;
}
Also used : MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster)

Example 2 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class TestMRAMWithNonNormalizedCapabilities method setup.

@Before
public void setup() throws Exception {
    if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
        return;
    }
    if (mrCluster == null) {
        mrCluster = new MiniMRYarnCluster(getClass().getSimpleName());
        mrCluster.init(new Configuration());
        mrCluster.start();
    }
    // Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
    // workaround the absent public discache.
    localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
    localFs.setPermission(APP_JAR, new FsPermission("700"));
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) FsPermission(org.apache.hadoop.fs.permission.FsPermission) File(java.io.File) Before(org.junit.Before)

Example 3 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class TestMRTimelineEventHandling method testMRTimelineEventHandling.

@Test
public void testMRTimelineEventHandling() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
    MiniMRYarnCluster cluster = null;
    try {
        cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
        cluster.init(conf);
        cluster.start();
        conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
        TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
        String localPathRoot = System.getProperty("test.build.data", "build/test/data");
        Path inDir = new Path(localPathRoot, "input");
        Path outDir = new Path(localPathRoot, "output");
        RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
        TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        TimelineEntity tEntity = entities.getEntities().get(0);
        Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
        Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(tEntity.getEvents().size() - 1).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(0).getEventType());
        job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.FAILED, job.getJobStatus().getState().getValue());
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(2, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
        Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(tEntity.getEvents().size() - 1).getEventType());
        Assert.assertEquals(EventType.JOB_FAILED.toString(), tEntity.getEvents().get(0).getEventType());
    } finally {
        if (cluster != null) {
            cluster.stop();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) TimelineEntities(org.apache.hadoop.yarn.api.records.timeline.TimelineEntities) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) TimelineStore(org.apache.hadoop.yarn.server.timeline.TimelineStore) Test(org.junit.Test)

Example 4 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class TestMRTimelineEventHandling method testMRNewTimelineServiceEventHandling.

@SuppressWarnings("deprecation")
@Test
public void testMRNewTimelineServiceEventHandling() throws Exception {
    LOG.info("testMRNewTimelineServiceEventHandling start.");
    String testDir = new File("target", getClass().getSimpleName() + "-test_dir").getAbsolutePath();
    String storageDir = testDir + File.separator + "timeline_service_data";
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    // enable new timeline service
    conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
    conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
    // set the file system root directory
    conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT, storageDir);
    // enable aux-service based timeline collectors
    conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
    conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + TIMELINE_AUX_SERVICE_NAME + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
    conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
    MiniMRYarnCluster cluster = null;
    try {
        cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1, true);
        cluster.init(conf);
        cluster.start();
        LOG.info("A MiniMRYarnCluster get start.");
        Path inDir = new Path(testDir, "input");
        Path outDir = new Path(testDir, "output");
        LOG.info("Run 1st job which should be successful.");
        JobConf successConf = new JobConf(conf);
        successConf.set("dummy_conf1", UtilsForTests.createConfigValue(51 * 1024));
        successConf.set("dummy_conf2", UtilsForTests.createConfigValue(51 * 1024));
        successConf.set("huge_dummy_conf1", UtilsForTests.createConfigValue(101 * 1024));
        successConf.set("huge_dummy_conf2", UtilsForTests.createConfigValue(101 * 1024));
        RunningJob job = UtilsForTests.runJobSucceed(successConf, inDir, outDir);
        Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(new Configuration(cluster.getConfig()));
        yarnClient.start();
        EnumSet<YarnApplicationState> appStates = EnumSet.allOf(YarnApplicationState.class);
        ApplicationId firstAppId = null;
        List<ApplicationReport> apps = yarnClient.getApplications(appStates);
        Assert.assertEquals(apps.size(), 1);
        ApplicationReport appReport = apps.get(0);
        firstAppId = appReport.getApplicationId();
        UtilsForTests.waitForAppFinished(job, cluster);
        checkNewTimelineEvent(firstAppId, appReport, storageDir);
        LOG.info("Run 2nd job which should be failed.");
        job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.FAILED, job.getJobStatus().getState().getValue());
        apps = yarnClient.getApplications(appStates);
        Assert.assertEquals(apps.size(), 2);
        appReport = apps.get(0).getApplicationId().equals(firstAppId) ? apps.get(0) : apps.get(1);
        checkNewTimelineEvent(firstAppId, appReport, storageDir);
    } finally {
        if (cluster != null) {
            cluster.stop();
        }
        // Cleanup test file
        File testDirFolder = new File(testDir);
        if (testDirFolder.isDirectory()) {
            FileUtils.deleteDirectory(testDirFolder);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) PerNodeTimelineCollectorsAuxService(org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Test(org.junit.Test)

Example 5 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class MiniMRClientClusterFactory method create.

public static MiniMRClientCluster create(Class<?> caller, String identifier, int noOfNMs, Configuration conf) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    FileSystem fs = FileSystem.get(conf);
    Path testRootDir = new Path("target", identifier + "-tmpDir").makeQualified(fs);
    Path appJar = new Path(testRootDir, "MRAppJar.jar");
    // Copy MRAppJar and make it private.
    Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);
    fs.copyFromLocalFile(appMasterJar, appJar);
    fs.setPermission(appJar, new FsPermission("744"));
    Job job = Job.getInstance(conf);
    job.addFileToClassPath(appJar);
    Path callerJar = new Path(JarFinder.getJar(caller));
    Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
    fs.copyFromLocalFile(callerJar, remoteCallerJar);
    fs.setPermission(remoteCallerJar, new FsPermission("744"));
    job.addFileToClassPath(remoteCallerJar);
    MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier, noOfNMs);
    job.getConfiguration().set("minimrclientcluster.caller.name", identifier);
    job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number", noOfNMs);
    miniMRYarnCluster.init(job.getConfiguration());
    miniMRYarnCluster.start();
    return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Job(org.apache.hadoop.mapreduce.Job)

Aggregations

MiniMRYarnCluster (org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster)11 Configuration (org.apache.hadoop.conf.Configuration)10 Path (org.apache.hadoop.fs.Path)8 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)7 Test (org.junit.Test)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 File (java.io.File)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 TimelineEntities (org.apache.hadoop.yarn.api.records.timeline.TimelineEntities)2 TimelineEntity (org.apache.hadoop.yarn.api.records.timeline.TimelineEntity)2 TimelineStore (org.apache.hadoop.yarn.server.timeline.TimelineStore)2 BeforeClass (org.junit.BeforeClass)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 JobClient (org.apache.hadoop.mapred.JobClient)1 JobConf (org.apache.hadoop.mapred.JobConf)1 JobStatus (org.apache.hadoop.mapred.JobStatus)1 Job (org.apache.hadoop.mapreduce.Job)1 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)1 ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)1