use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TestNetworkedJob method testNetworkedJob.
/**
* test JobConf
* @throws Exception
*/
@SuppressWarnings("deprecation")
@Test(timeout = 500000)
public void testNetworkedJob() throws Exception {
// mock creation
MiniMRClientCluster mr = null;
FileSystem fileSys = null;
try {
mr = createMiniClusterWithCapacityScheduler();
JobConf job = new JobConf(mr.getConfig());
fileSys = FileSystem.get(job);
fileSys.delete(testDir, true);
FSDataOutputStream out = fileSys.create(inFile, true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job, inFile);
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
RunningJob rj = client.submitJob(job);
JobID jobId = rj.getID();
NetworkedJob runningJob = (NetworkedJob) client.getJob(jobId);
runningJob.setJobPriority(JobPriority.HIGH.name());
// test getters
assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml"));
assertEquals(jobId, runningJob.getID());
assertEquals(jobId.toString(), runningJob.getJobID());
assertEquals("N/A", runningJob.getJobName());
assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml"));
assertTrue(runningJob.getTrackingURL().length() > 0);
assertTrue(runningJob.mapProgress() == 0.0f);
assertTrue(runningJob.reduceProgress() == 0.0f);
assertTrue(runningJob.cleanupProgress() == 0.0f);
assertTrue(runningJob.setupProgress() == 0.0f);
TaskCompletionEvent[] tce = runningJob.getTaskCompletionEvents(0);
assertEquals(tce.length, 0);
assertEquals("", runningJob.getHistoryUrl());
assertFalse(runningJob.isRetired());
assertEquals("", runningJob.getFailureInfo());
assertEquals("N/A", runningJob.getJobStatus().getJobName());
assertEquals(0, client.getMapTaskReports(jobId).length);
try {
client.getSetupTaskReports(jobId);
} catch (YarnRuntimeException e) {
assertEquals("Unrecognized task type: JOB_SETUP", e.getMessage());
}
try {
client.getCleanupTaskReports(jobId);
} catch (YarnRuntimeException e) {
assertEquals("Unrecognized task type: JOB_CLEANUP", e.getMessage());
}
assertEquals(0, client.getReduceTaskReports(jobId).length);
// test ClusterStatus
ClusterStatus status = client.getClusterStatus(true);
assertEquals(2, status.getActiveTrackerNames().size());
// it method does not implemented and always return empty array or null;
assertEquals(0, status.getBlacklistedTrackers());
assertEquals(0, status.getBlacklistedTrackerNames().size());
assertEquals(0, status.getBlackListedTrackersInfo().size());
assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
assertEquals(1, status.getMapTasks());
assertEquals(20, status.getMaxMapTasks());
assertEquals(4, status.getMaxReduceTasks());
assertEquals(0, status.getNumExcludedNodes());
assertEquals(1, status.getReduceTasks());
assertEquals(2, status.getTaskTrackers());
assertEquals(0, status.getTTExpiryInterval());
assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
assertEquals(0, status.getGraylistedTrackers());
// test read and write
ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
status.write(new DataOutputStream(dataOut));
ClusterStatus status2 = new ClusterStatus();
status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray())));
assertEquals(status.getActiveTrackerNames(), status2.getActiveTrackerNames());
assertEquals(status.getBlackListedTrackersInfo(), status2.getBlackListedTrackersInfo());
assertEquals(status.getMapTasks(), status2.getMapTasks());
// test taskStatusfilter
JobClient.setTaskOutputFilter(job, TaskStatusFilter.ALL);
assertEquals(TaskStatusFilter.ALL, JobClient.getTaskOutputFilter(job));
// runningJob.setJobPriority(JobPriority.HIGH.name());
// test default map
assertEquals(20, client.getDefaultMaps());
assertEquals(4, client.getDefaultReduces());
assertEquals("jobSubmitDir", client.getSystemDir().getName());
// test queue information
JobQueueInfo[] rootQueueInfo = client.getRootQueues();
assertEquals(1, rootQueueInfo.length);
assertEquals("default", rootQueueInfo[0].getQueueName());
JobQueueInfo[] qinfo = client.getQueues();
assertEquals(1, qinfo.length);
assertEquals("default", qinfo[0].getQueueName());
assertEquals(0, client.getChildQueues("default").length);
assertEquals(1, client.getJobsFromQueue("default").length);
assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml"));
JobQueueInfo qi = client.getQueueInfo("default");
assertEquals("default", qi.getQueueName());
assertEquals("running", qi.getQueueState());
QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser();
assertEquals(2, aai.length);
assertEquals("root", aai[0].getQueueName());
assertEquals("default", aai[1].getQueueName());
// test JobClient
// The following asserts read JobStatus twice and ensure the returned
// JobStatus objects correspond to the same Job.
assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId).getJobStatus().getJobID());
assertEquals("Expected matching startTimes", rj.getJobStatus().getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
} finally {
if (fileSys != null) {
fileSys.delete(testDir, true);
}
if (mr != null) {
mr.stop();
}
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class WebServer method serviceStart.
@Override
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
String bindAddress = WebAppUtils.getWebAppBindURL(conf, YarnConfiguration.NM_BIND_HOST, WebAppUtils.getNMWebAppURLWithoutScheme(conf));
boolean enableCors = conf.getBoolean(YarnConfiguration.NM_WEBAPP_ENABLE_CORS_FILTER, YarnConfiguration.DEFAULT_NM_WEBAPP_ENABLE_CORS_FILTER);
if (enableCors) {
getConfig().setBoolean(HttpCrossOriginFilterInitializer.PREFIX + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
}
// Always load pseudo authentication filter to parse "user.name" in an URL
// to identify a HTTP request's user.
boolean hasHadoopAuthFilterInitializer = false;
String filterInitializerConfKey = "hadoop.http.filter.initializers";
Class<?>[] initializersClasses = conf.getClasses(filterInitializerConfKey);
List<String> targets = new ArrayList<String>();
if (initializersClasses != null) {
for (Class<?> initializer : initializersClasses) {
if (initializer.getName().equals(AuthenticationFilterInitializer.class.getName())) {
hasHadoopAuthFilterInitializer = true;
break;
}
targets.add(initializer.getName());
}
}
if (!hasHadoopAuthFilterInitializer) {
targets.add(AuthenticationFilterInitializer.class.getName());
conf.set(filterInitializerConfKey, StringUtils.join(",", targets));
}
LOG.info("Instantiating NMWebApp at " + bindAddress);
try {
this.webApp = WebApps.$for("node", Context.class, this.nmContext, "ws").at(bindAddress).with(conf).withHttpSpnegoPrincipalKey(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY).withHttpSpnegoKeytabKey(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY).withCSRFProtection(YarnConfiguration.NM_CSRF_PREFIX).withXFSProtection(YarnConfiguration.NM_XFS_PREFIX).start(this.nmWebApp);
this.port = this.webApp.httpServer().getConnectorAddress(0).getPort();
} catch (Exception e) {
String msg = "NMWebapps failed to start.";
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
super.serviceStart();
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TestLocalDirsHandlerService method testValidPathsDirHandlerService.
@Test
public void testValidPathsDirHandlerService() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
String localDir2 = new File("hdfs:///" + testDir, "localDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
try {
dirSvc.init(conf);
Assert.fail("Service should have thrown an exception due to wrong URI");
} catch (YarnRuntimeException e) {
}
Assert.assertEquals("Service should not be inited", STATE.STOPPED, dirSvc.getServiceState());
dirSvc.close();
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class LeveldbRMStateStore method createAttemptState.
private ApplicationAttemptStateData createAttemptState(String itemName, byte[] data) throws IOException {
ApplicationAttemptId attemptId = ApplicationAttemptId.fromString(itemName);
ApplicationAttemptStateDataPBImpl attemptState = new ApplicationAttemptStateDataPBImpl(ApplicationAttemptStateDataProto.parseFrom(data));
if (!attemptId.equals(attemptState.getAttemptId())) {
throw new YarnRuntimeException("The database entry for " + attemptId + " contains data for " + attemptState.getAttemptId());
}
return attemptState;
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class MemoryRMStateStore method removeApplicationStateInternal.
@Override
public synchronized void removeApplicationStateInternal(ApplicationStateData appState) throws Exception {
ApplicationId appId = appState.getApplicationSubmissionContext().getApplicationId();
ApplicationStateData removed = state.appState.remove(appId);
if (removed == null) {
throw new YarnRuntimeException("Removing non-existing application state");
}
}
Aggregations