use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class MiniMRYarnClusterAdapter method restart.
@Override
public void restart() {
if (!miniMRYarnCluster.getServiceState().equals(STATE.STARTED)) {
LOG.warn("Cannot restart the mini cluster, start it first");
return;
}
Configuration oldConf = new Configuration(getConfig());
String callerName = oldConf.get("minimrclientcluster.caller.name", this.getClass().getName());
int noOfNMs = oldConf.getInt("minimrclientcluster.nodemanagers.number", 1);
oldConf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
oldConf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
stop();
miniMRYarnCluster = new MiniMRYarnCluster(callerName, noOfNMs);
miniMRYarnCluster.init(oldConf);
miniMRYarnCluster.start();
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class DummySocketFactory method testSocketFactory.
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test
public void testSocketFactory() throws IOException {
// Create a standard mini-cluster
Configuration sconf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).numDataNodes(1).build();
final int nameNodePort = cluster.getNameNodePort();
// Get a reference to its DFS directly
FileSystem fs = cluster.getFileSystem();
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs = (DistributedFileSystem) fs;
Configuration cconf = getCustomSocketConfigs(nameNodePort);
fs = FileSystem.get(cconf);
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
JobClient client = null;
MiniMRYarnCluster miniMRYarnCluster = null;
try {
// This will test RPC to the NameNode only.
// could we test Client-DataNode connections?
Path filePath = new Path("/dir");
Assert.assertFalse(directDfs.exists(filePath));
Assert.assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
Assert.assertTrue(directDfs.exists(filePath));
Assert.assertTrue(dfs.exists(filePath));
// This will test RPC to a Resource Manager
fs = FileSystem.get(sconf);
JobConf jobConf = new JobConf();
FileSystem.setDefaultUri(jobConf, fs.getUri().toString());
miniMRYarnCluster = initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf = new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
String[] split = rmAddress.split(":");
jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':' + (Integer.parseInt(split[1]) + 10));
client = new JobClient(jconf);
JobStatus[] jobs = client.jobsToComplete();
Assert.assertTrue(jobs.length == 0);
} finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class TestMRTimelineEventHandling method testTimelineServiceStartInMiniCluster.
@Test
public void testTimelineServiceStartInMiniCluster() throws Exception {
Configuration conf = new YarnConfiguration();
/*
* Timeline service should not start if the config is set to false
* Regardless to the value of MAPREDUCE_JOB_EMIT_TIMELINE_DATA
*/
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
//verify that the timeline service is not started.
Assert.assertNull("Timeline Service should not have been started", cluster.getApplicationHistoryServer());
} finally {
if (cluster != null) {
cluster.stop();
}
}
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
cluster = null;
try {
cluster = new MiniMRYarnCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
//verify that the timeline service is not started.
Assert.assertNull("Timeline Service should not have been started", cluster.getApplicationHistoryServer());
} finally {
if (cluster != null) {
cluster.stop();
}
}
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class TestMRTimelineEventHandling method testMapreduceJobTimelineServiceEnabled.
@Test
public void testMapreduceJobTimelineServiceEnabled() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
MiniMRYarnCluster cluster = null;
FileSystem fs = null;
Path inDir = new Path(GenericTestUtils.getTempPath("input"));
Path outDir = new Path(GenericTestUtils.getTempPath("output"));
try {
fs = FileSystem.get(conf);
cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(0, entities.getEntities().size());
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
} finally {
if (cluster != null) {
cluster.stop();
}
deletePaths(fs, inDir, outDir);
}
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
cluster = null;
try {
cluster = new MiniMRYarnCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(0, entities.getEntities().size());
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
} finally {
if (cluster != null) {
cluster.stop();
}
deletePaths(fs, inDir, outDir);
}
}
use of org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster in project hadoop by apache.
the class TestMiniMRChildTask method setup.
@BeforeClass
public static void setup() throws IOException {
// create configuration, dfs, file system and mapred cluster
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
if (mr == null) {
mr = new MiniMRYarnCluster(TestMiniMRChildTask.class.getName());
Configuration conf = new Configuration();
mr.init(conf);
mr.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
Aggregations