use of org.apache.hadoop.mapreduce.JobStatus in project hadoop by apache.
the class TestClientServiceDelegate method testReconnectOnAMRestart.
@Test
public void testReconnectOnAMRestart() throws IOException {
//as instantiateAMProxy is not called at all
if (!isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
// RM returns AM1 url, null, null and AM2 url on invocations.
// Nulls simulate the time when AM2 is in the process of restarting.
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1", 78)).thenReturn(getRunningApplicationReport(null, 0)).thenReturn(getRunningApplicationReport(null, 0)).thenReturn(getRunningApplicationReport("am2", 90));
} catch (YarnException e) {
throw new IOException(e);
}
GetJobReportResponse jobReportResponse1 = mock(GetJobReportResponse.class);
when(jobReportResponse1.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId, "jobName-firstGen", "user", JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false, ""));
// First AM returns a report with jobName firstGen and simulates AM shutdown
// on second invocation.
MRClientProtocol firstGenAMProxy = mock(MRClientProtocol.class);
when(firstGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse1).thenThrow(new RuntimeException("AM is down!"));
GetJobReportResponse jobReportResponse2 = mock(GetJobReportResponse.class);
when(jobReportResponse2.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId, "jobName-secondGen", "user", JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false, ""));
// Second AM generation returns a report with jobName secondGen
MRClientProtocol secondGenAMProxy = mock(MRClientProtocol.class);
when(secondGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse2);
ClientServiceDelegate clientServiceDelegate = spy(getClientServiceDelegate(historyServerProxy, rmDelegate));
// First time, connection should be to AM1, then to AM2. Further requests
// should use the same proxy to AM2 and so instantiateProxy shouldn't be
// called.
doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-firstGen", jobStatus.getJobName());
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen", jobStatus.getJobName());
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen", jobStatus.getJobName());
verify(clientServiceDelegate, times(2)).instantiateAMProxy(any(InetSocketAddress.class));
}
use of org.apache.hadoop.mapreduce.JobStatus in project hadoop by apache.
the class TestClientServiceDelegate method testUnknownAppInRM.
@Test
public void testUnknownAppInRM() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponse());
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(historyServerProxy, getRMDelegate());
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
}
use of org.apache.hadoop.mapreduce.JobStatus in project hadoop by apache.
the class TestClientServiceDelegate method testRetriesOnConnectionFailure.
@Test
public void testRetriesOnConnectionFailure() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(new RuntimeException("1")).thenThrow(new RuntimeException("2")).thenReturn(getJobReportResponse());
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(historyServerProxy, rm);
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
verify(historyServerProxy, times(3)).getJobReport(any(GetJobReportRequest.class));
}
use of org.apache.hadoop.mapreduce.JobStatus in project hadoop by apache.
the class ClientServiceDelegate method getJobStatus.
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID);
GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport();
JobStatus jobStatus = null;
if (report != null) {
if (StringUtils.isEmpty(report.getJobFile())) {
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
report.setJobFile(jobFile);
}
String historyTrackingUrl = report.getTrackingUrl();
String url = StringUtils.isNotEmpty(historyTrackingUrl) ? historyTrackingUrl : trackingUrl;
jobStatus = TypeConverter.fromYarn(report, url);
}
return jobStatus;
}
use of org.apache.hadoop.mapreduce.JobStatus in project hadoop by apache.
the class YARNRunner method killJob.
@Override
public void killJob(JobID arg0) throws IOException, InterruptedException {
/* check if the status is not running, if not send kill to RM */
JobStatus status = clientCache.getClient(arg0).getJobStatus(arg0);
ApplicationId appId = TypeConverter.toYarn(arg0).getAppId();
// get status from RM and return
if (status == null) {
killUnFinishedApplication(appId);
return;
}
if (status.getState() != JobStatus.State.RUNNING) {
killApplication(appId);
return;
}
try {
/* send a kill to the AM */
clientCache.getClient(arg0).killJob(arg0);
long currentTimeMillis = System.currentTimeMillis();
long timeKillIssued = currentTimeMillis;
long killTimeOut = conf.getLong(MRJobConfig.MR_AM_HARD_KILL_TIMEOUT_MS, MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS);
while ((currentTimeMillis < timeKillIssued + killTimeOut) && !isJobInTerminalState(status)) {
try {
Thread.sleep(1000L);
} catch (InterruptedException ie) {
/** interrupted, just break */
break;
}
currentTimeMillis = System.currentTimeMillis();
status = clientCache.getClient(arg0).getJobStatus(arg0);
if (status == null) {
killUnFinishedApplication(appId);
return;
}
}
} catch (IOException io) {
LOG.debug("Error when checking for application status", io);
}
if (status != null && !isJobInTerminalState(status)) {
killApplication(appId);
}
}
Aggregations