use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestMapReduceTrackingUriPlugin method testProducesHistoryServerUriForAppId.
@Test
public void testProducesHistoryServerUriForAppId() throws URISyntaxException {
final String historyAddress = "example.net:424242";
YarnConfiguration conf = new YarnConfiguration();
conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, historyAddress);
MapReduceTrackingUriPlugin plugin = new MapReduceTrackingUriPlugin();
plugin.setConf(conf);
ApplicationId id = ApplicationId.newInstance(6384623l, 5);
String jobSuffix = id.toString().replaceFirst("^application_", "job_");
URI expected = new URI("http://" + historyAddress + "/jobhistory/job/" + jobSuffix);
URI actual = plugin.getTrackingUri(id);
assertEquals(expected, actual);
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestMRTimelineEventHandling method testMRTimelineEventHandling.
@Test
public void testMRTimelineEventHandling() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + cluster.getApplicationHistoryServer().getPort());
TimelineStore ts = cluster.getApplicationHistoryServer().getTimelineStore();
String localPathRoot = System.getProperty("test.build.data", "build/test/data");
Path inDir = new Path(localPathRoot, "input");
Path outDir = new Path(localPathRoot, "output");
RunningJob job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(tEntity.getEvents().size() - 1).getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(0).getEventType());
job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.FAILED, job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
Assert.assertEquals(2, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(tEntity.getEvents().size() - 1).getEventType());
Assert.assertEquals(EventType.JOB_FAILED.toString(), tEntity.getEvents().get(0).getEventType());
} finally {
if (cluster != null) {
cluster.stop();
}
}
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestMRTimelineEventHandling method testMRNewTimelineServiceEventHandling.
@SuppressWarnings("deprecation")
@Test
public void testMRNewTimelineServiceEventHandling() throws Exception {
LOG.info("testMRNewTimelineServiceEventHandling start.");
String testDir = new File("target", getClass().getSimpleName() + "-test_dir").getAbsolutePath();
String storageDir = testDir + File.separator + "timeline_service_data";
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
// enable new timeline service
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
// set the file system root directory
conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT, storageDir);
// enable aux-service based timeline collectors
conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + TIMELINE_AUX_SERVICE_NAME + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(TestMRTimelineEventHandling.class.getSimpleName(), 1, true);
cluster.init(conf);
cluster.start();
LOG.info("A MiniMRYarnCluster get start.");
Path inDir = new Path(testDir, "input");
Path outDir = new Path(testDir, "output");
LOG.info("Run 1st job which should be successful.");
JobConf successConf = new JobConf(conf);
successConf.set("dummy_conf1", UtilsForTests.createConfigValue(51 * 1024));
successConf.set("dummy_conf2", UtilsForTests.createConfigValue(51 * 1024));
successConf.set("huge_dummy_conf1", UtilsForTests.createConfigValue(101 * 1024));
successConf.set("huge_dummy_conf2", UtilsForTests.createConfigValue(101 * 1024));
RunningJob job = UtilsForTests.runJobSucceed(successConf, inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED, job.getJobStatus().getState().getValue());
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(new Configuration(cluster.getConfig()));
yarnClient.start();
EnumSet<YarnApplicationState> appStates = EnumSet.allOf(YarnApplicationState.class);
ApplicationId firstAppId = null;
List<ApplicationReport> apps = yarnClient.getApplications(appStates);
Assert.assertEquals(apps.size(), 1);
ApplicationReport appReport = apps.get(0);
firstAppId = appReport.getApplicationId();
UtilsForTests.waitForAppFinished(job, cluster);
checkNewTimelineEvent(firstAppId, appReport, storageDir);
LOG.info("Run 2nd job which should be failed.");
job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.FAILED, job.getJobStatus().getState().getValue());
apps = yarnClient.getApplications(appStates);
Assert.assertEquals(apps.size(), 2);
appReport = apps.get(0).getApplicationId().equals(firstAppId) ? apps.get(0) : apps.get(1);
checkNewTimelineEvent(firstAppId, appReport, storageDir);
} finally {
if (cluster != null) {
cluster.stop();
}
// Cleanup test file
File testDirFolder = new File(testDir);
if (testDirFolder.isDirectory()) {
FileUtils.deleteDirectory(testDirFolder);
}
}
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestLogsCLI method testUnknownApplicationId.
@Test(timeout = 5000l)
public void testUnknownApplicationId() throws Exception {
Configuration conf = new YarnConfiguration();
YarnClient mockYarnClient = createMockYarnClientUnknownApp();
LogsCLI cli = new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode = cli.run(new String[] { "-applicationId", ApplicationId.newInstance(1, 1).toString() });
// Error since no logs present for the app.
assertTrue(exitCode != 0);
assertTrue(sysErrStream.toString().startsWith("Unable to get ApplicationState"));
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestLogsCLI method testHelpMessage.
@Test(timeout = 5000l)
public void testHelpMessage() throws Exception {
Configuration conf = new YarnConfiguration();
YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED, UserGroupInformation.getCurrentUser().getShortUserName());
LogsCLI dumper = new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
int exitCode = dumper.run(new String[] {});
assertTrue(exitCode == -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter pw = new PrintWriter(baos);
pw.println("Retrieve logs for YARN applications.");
pw.println("usage: yarn logs -applicationId <application ID> [OPTIONS]");
pw.println();
pw.println("general options are:");
pw.println(" -am <AM Containers> Prints the AM Container logs for");
pw.println(" this application. Specify");
pw.println(" comma-separated value to get logs");
pw.println(" for related AM Container. For");
pw.println(" example, If we specify -am 1,2,");
pw.println(" we will get the logs for the");
pw.println(" first AM Container as well as the");
pw.println(" second AM Container. To get logs");
pw.println(" for all AM Containers, use -am");
pw.println(" ALL. To get logs for the latest");
pw.println(" AM Container, use -am -1. By");
pw.println(" default, it will print all");
pw.println(" available logs. Work with");
pw.println(" -log_files to get only specific");
pw.println(" logs.");
pw.println(" -appOwner <Application Owner> AppOwner (assumed to be current");
pw.println(" user if not specified)");
pw.println(" -containerId <Container ID> ContainerId. By default, it will");
pw.println(" print all available logs. Work");
pw.println(" with -log_files to get only");
pw.println(" specific logs. If specified, the");
pw.println(" applicationId can be omitted");
pw.println(" -help Displays help for all commands.");
pw.println(" -list_nodes Show the list of nodes that");
pw.println(" successfully aggregated logs.");
pw.println(" This option can only be used with");
pw.println(" finished applications.");
pw.println(" -log_files <Log File Name> Specify comma-separated value to");
pw.println(" get exact matched log files. Use");
pw.println(" \"ALL\" or \"*\" to fetch all the log");
pw.println(" files for the container.");
pw.println(" -log_files_pattern <Log File Pattern> Specify comma-separated value to");
pw.println(" get matched log files by using");
pw.println(" java regex. Use \".*\" to fetch all");
pw.println(" the log files for the container.");
pw.println(" -nodeAddress <Node Address> NodeAddress in the format");
pw.println(" nodename:port");
pw.println(" -out <Local Directory> Local directory for storing");
pw.println(" individual container logs. The");
pw.println(" container logs will be stored");
pw.println(" based on the node the container");
pw.println(" ran on.");
pw.println(" -show_application_log_info Show the containerIds which");
pw.println(" belong to the specific");
pw.println(" Application. You can combine this");
pw.println(" with --nodeAddress to get");
pw.println(" containerIds for all the");
pw.println(" containers on the specific");
pw.println(" NodeManager.");
pw.println(" -show_container_log_info Show the container log metadata,");
pw.println(" including log-file names, the");
pw.println(" size of the log files. You can");
pw.println(" combine this with --containerId");
pw.println(" to get log metadata for the");
pw.println(" specific container, or with");
pw.println(" --nodeAddress to get log metadata");
pw.println(" for all the containers on the");
pw.println(" specific NodeManager.");
pw.println(" -size <size> Prints the log file's first 'n'");
pw.println(" bytes or the last 'n' bytes. Use");
pw.println(" negative values as bytes to read");
pw.println(" from the end and positive values");
pw.println(" as bytes to read from the");
pw.println(" beginning.");
pw.close();
String appReportStr = baos.toString("UTF-8");
Assert.assertEquals(appReportStr, sysOutStream.toString());
}
Aggregations