Search in sources :

Example 6 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class MiniMRYarnClusterAdapter method restart.

@Override
public void restart() {
    if (!miniMRYarnCluster.getServiceState().equals(STATE.STARTED)) {
        LOG.warn("Cannot restart the mini cluster, start it first");
        return;
    }
    Configuration oldConf = new Configuration(getConfig());
    String callerName = oldConf.get("minimrclientcluster.caller.name", this.getClass().getName());
    int noOfNMs = oldConf.getInt("minimrclientcluster.nodemanagers.number", 1);
    oldConf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    oldConf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
    stop();
    miniMRYarnCluster = new MiniMRYarnCluster(callerName, noOfNMs);
    miniMRYarnCluster.init(oldConf);
    miniMRYarnCluster.start();
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster)

Example 7 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class DummySocketFactory method testSocketFactory.

/**
   * Check that we can reach a NameNode or Resource Manager using a specific
   * socket factory
   */
@Test
public void testSocketFactory() throws IOException {
    // Create a standard mini-cluster
    Configuration sconf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).numDataNodes(1).build();
    final int nameNodePort = cluster.getNameNodePort();
    // Get a reference to its DFS directly
    FileSystem fs = cluster.getFileSystem();
    Assert.assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem directDfs = (DistributedFileSystem) fs;
    Configuration cconf = getCustomSocketConfigs(nameNodePort);
    fs = FileSystem.get(cconf);
    Assert.assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    JobClient client = null;
    MiniMRYarnCluster miniMRYarnCluster = null;
    try {
        // This will test RPC to the NameNode only.
        // could we test Client-DataNode connections?
        Path filePath = new Path("/dir");
        Assert.assertFalse(directDfs.exists(filePath));
        Assert.assertFalse(dfs.exists(filePath));
        directDfs.mkdirs(filePath);
        Assert.assertTrue(directDfs.exists(filePath));
        Assert.assertTrue(dfs.exists(filePath));
        // This will test RPC to a Resource Manager
        fs = FileSystem.get(sconf);
        JobConf jobConf = new JobConf();
        FileSystem.setDefaultUri(jobConf, fs.getUri().toString());
        miniMRYarnCluster = initAndStartMiniMRYarnCluster(jobConf);
        JobConf jconf = new JobConf(miniMRYarnCluster.getConfig());
        jconf.set("hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.ipc.DummySocketFactory");
        jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
        String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
        String[] split = rmAddress.split(":");
        jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':' + (Integer.parseInt(split[1]) + 10));
        client = new JobClient(jconf);
        JobStatus[] jobs = client.jobsToComplete();
        Assert.assertTrue(jobs.length == 0);
    } finally {
        closeClient(client);
        closeDfs(dfs);
        closeDfs(directDfs);
        stopMiniMRYarnCluster(miniMRYarnCluster);
        shutdownDFSCluster(cluster);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) JobClient(org.apache.hadoop.mapred.JobClient) JobStatus(org.apache.hadoop.mapred.JobStatus) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) JobConf(org.apache.hadoop.mapred.JobConf) Test(org.junit.Test)

Example 8 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class TestMRTimelineEventHandling method testTimelineServiceStartInMiniCluster.

@Test
public void testTimelineServiceStartInMiniCluster() throws Exception {
    Configuration conf = new YarnConfiguration();
    /*
     * Timeline service should not start if the config is set to false
     * Regardless to the value of MAPREDUCE_JOB_EMIT_TIMELINE_DATA
     */
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
    MiniMRYarnCluster cluster = null;
    try {
        cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
        cluster.init(conf);
        cluster.start();
        //verify that the timeline service is not started.
        Assert.assertNull("Timeline Service should not have been started", cluster.getApplicationHistoryServer());
    } finally {
        if (cluster != null) {
            cluster.stop();
        }
    }
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
    cluster = null;
    try {
        cluster = new MiniMRYarnCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1);
        cluster.init(conf);
        cluster.start();
        //verify that the timeline service is not started.
        Assert.assertNull("Timeline Service should not have been started", cluster.getApplicationHistoryServer());
    } finally {
        if (cluster != null) {
            cluster.stop();
        }
    }
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) Test(org.junit.Test)

Example 9 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class TestMRTimelineEventHandling method testMapreduceJobTimelineServiceEnabled.

@Test
public void testMapreduceJobTimelineServiceEnabled() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
    MiniMRYarnCluster cluster = null;
    FileSystem fs = null;
    Path inDir = new Path(GenericTestUtils.getTempPath("input"));
    Path outDir = new Path(GenericTestUtils.getTempPath("output"));
    try {
        fs = FileSystem.get(conf);
        cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
        cluster.init(conf);
        cluster.start();
        conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
        TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
        RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
        TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(0, entities.getEntities().size());
        conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
        job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        TimelineEntity tEntity = entities.getEntities().get(0);
        Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
    } finally {
        if (cluster != null) {
            cluster.stop();
        }
        deletePaths(fs, inDir, outDir);
    }
    conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
    cluster = null;
    try {
        cluster = new MiniMRYarnCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1);
        cluster.init(conf);
        cluster.start();
        conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
        TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
        conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
        RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
        TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(0, entities.getEntities().size());
        conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
        job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
        Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        TimelineEntity tEntity = entities.getEntities().get(0);
        Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
    } finally {
        if (cluster != null) {
            cluster.stop();
        }
        deletePaths(fs, inDir, outDir);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) TimelineEntities(org.apache.hadoop.yarn.api.records.timeline.TimelineEntities) FileSystem(org.apache.hadoop.fs.FileSystem) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) TimelineStore(org.apache.hadoop.yarn.server.timeline.TimelineStore) Test(org.junit.Test)

Example 10 with MiniMRYarnCluster

use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.

the class TestMiniMRChildTask method setup.

@BeforeClass
public static void setup() throws IOException {
    // create configuration, dfs, file system and mapred cluster 
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
        return;
    }
    if (mr == null) {
        mr = new MiniMRYarnCluster(TestMiniMRChildTask.class.getName());
        Configuration conf = new Configuration();
        mr.init(conf);
        mr.start();
    }
    // Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
    // workaround the absent public discache.
    localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
    localFs.setPermission(APP_JAR, new FsPermission("700"));
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) MiniMRYarnCluster(org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster) FsPermission(org.apache.hadoop.fs.permission.FsPermission) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Aggregations

MiniMRYarnCluster (org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster)11 Configuration (org.apache.hadoop.conf.Configuration)10 Path (org.apache.hadoop.fs.Path)8 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)7 Test (org.junit.Test)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 File (java.io.File)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 TimelineEntities (org.apache.hadoop.yarn.api.records.timeline.TimelineEntities)2 TimelineEntity (org.apache.hadoop.yarn.api.records.timeline.TimelineEntity)2 TimelineStore (org.apache.hadoop.yarn.server.timeline.TimelineStore)2 BeforeClass (org.junit.BeforeClass)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 JobClient (org.apache.hadoop.mapred.JobClient)1 JobConf (org.apache.hadoop.mapred.JobConf)1 JobStatus (org.apache.hadoop.mapred.JobStatus)1 Job (org.apache.hadoop.mapreduce.Job)1 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)1 ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)1