use of org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse in project hadoop by apache.
the class ClientServiceDelegate method getLogFilePath.
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID);
GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport();
if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED, JobState.ERROR).contains(report.getJobState())) {
if (oldTaskAttemptID != null) {
GetTaskAttemptReportRequest taRequest = recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
TaskAttemptReport taReport = ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport", GetTaskAttemptReportRequest.class, taRequest)).getTaskAttemptReport();
if (taReport.getContainerId() == null || taReport.getNodeManagerHost() == null) {
throw new IOException("Unable to get log information for task: " + oldTaskAttemptID);
}
return new LogParams(taReport.getContainerId().toString(), taReport.getContainerId().getApplicationAttemptId().getApplicationId().toString(), NodeId.newInstance(taReport.getNodeManagerHost(), taReport.getNodeManagerPort()).toString(), report.getUser());
} else {
if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
throw new IOException("Unable to get log information for job: " + oldJobID);
}
AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
return new LogParams(amInfo.getContainerId().toString(), amInfo.getAppAttemptId().getApplicationId().toString(), NodeId.newInstance(amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort()).toString(), report.getUser());
}
} else {
throw new IOException("Cannot get log path for a in-progress job");
}
}
use of org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse in project hadoop by apache.
the class ClientServiceDelegate method getJobStatus.
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID);
GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport();
JobStatus jobStatus = null;
if (report != null) {
if (StringUtils.isEmpty(report.getJobFile())) {
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
report.setJobFile(jobFile);
}
String historyTrackingUrl = report.getTrackingUrl();
String url = StringUtils.isNotEmpty(historyTrackingUrl) ? historyTrackingUrl : trackingUrl;
jobStatus = TypeConverter.fromYarn(report, url);
}
return jobStatus;
}
use of org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse in project hadoop by apache.
the class NotRunningJob method getJobReport.
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException {
JobReport jobReport = recordFactory.newRecordInstance(JobReport.class);
jobReport.setJobId(request.getJobId());
jobReport.setJobState(jobState);
jobReport.setUser(applicationReport.getUser());
jobReport.setStartTime(applicationReport.getStartTime());
YarnApplicationState state = applicationReport.getYarnApplicationState();
if (state == YarnApplicationState.KILLED || state == YarnApplicationState.FAILED || state == YarnApplicationState.FINISHED) {
jobReport.setDiagnostics(applicationReport.getDiagnostics());
}
jobReport.setJobName(applicationReport.getName());
jobReport.setTrackingUrl(applicationReport.getTrackingUrl());
jobReport.setFinishTime(applicationReport.getFinishTime());
GetJobReportResponse resp = recordFactory.newRecordInstance(GetJobReportResponse.class);
resp.setJobReport(jobReport);
return resp;
}
use of org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse in project hadoop by apache.
the class TestClientServiceDelegate method testRetriesOnAMConnectionFailures.
@Test
public void testRetriesOnAMConnectionFailures() throws Exception {
if (!isAMReachableFromClient) {
return;
}
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(getRunningApplicationReport("am1", 78));
// throw exception in 1st, 2nd, 3rd and 4th call of getJobReport, and
// succeed in the 5th call.
final MRClientProtocol amProxy = mock(MRClientProtocol.class);
when(amProxy.getJobReport(any(GetJobReportRequest.class))).thenThrow(new RuntimeException("11")).thenThrow(new RuntimeException("22")).thenThrow(new RuntimeException("33")).thenThrow(new RuntimeException("44")).thenReturn(getJobReportResponse());
Configuration conf = new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, !isAMReachableFromClient);
ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(conf, rm, oldJobId, null) {
@Override
MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr) throws IOException {
super.instantiateAMProxy(serviceAddr);
return amProxy;
}
};
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
// assert maxClientRetry is not decremented.
Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES), clientServiceDelegate.getMaxClientRetry());
verify(amProxy, times(5)).getJobReport(any(GetJobReportRequest.class));
}
use of org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse in project hadoop by apache.
the class TestClientServiceDelegate method getJobReportResponseFromHistoryServer.
private GetJobReportResponse getJobReportResponseFromHistoryServer() {
GetJobReportResponse jobReportResponse = Records.newRecord(GetJobReportResponse.class);
JobReport jobReport = Records.newRecord(JobReport.class);
jobReport.setJobId(jobId);
jobReport.setJobState(JobState.SUCCEEDED);
jobReport.setMapProgress(1.0f);
jobReport.setReduceProgress(1.0f);
jobReport.setJobFile("TestJobFilePath");
jobReport.setTrackingUrl("http://TestTrackingUrl");
jobReportResponse.setJobReport(jobReport);
return jobReportResponse;
}
Aggregations