use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TestTaskImpl method testCountersWithSpeculation.
@Test
public void testCountersWithSpeculation() {
mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(), remoteJobConfFile, conf, taskAttemptListener, jobToken, credentials, clock, startCount, metrics, appContext, TaskType.MAP) {
@Override
protected int getMaxAttempts() {
return 1;
}
};
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl baseAttempt = getLastAttempt();
// add a speculative attempt
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl specAttempt = getLastAttempt();
assertEquals(2, taskAttempts.size());
Counters specAttemptCounters = new Counters();
Counter cpuCounter = specAttemptCounters.findCounter(TaskCounter.CPU_MILLISECONDS);
cpuCounter.setValue(1000);
specAttempt.setCounters(specAttemptCounters);
// have the spec attempt succeed but second attempt at 1.0 progress as well
commitTaskAttempt(specAttempt.getAttemptId());
specAttempt.setProgress(1.0f);
specAttempt.setState(TaskAttemptState.SUCCEEDED);
mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED));
assertEquals(TaskState.SUCCEEDED, mockTask.getState());
baseAttempt.setProgress(1.0f);
Counters taskCounters = mockTask.getCounters();
assertEquals("wrong counters for task", specAttemptCounters, taskCounters);
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class CompletedTask method constructTaskReport.
private void constructTaskReport() {
loadAllTaskAttempts();
this.report = Records.newRecord(TaskReport.class);
report.setTaskId(taskId);
long minLaunchTime = Long.MAX_VALUE;
for (TaskAttempt attempt : attempts.values()) {
minLaunchTime = Math.min(minLaunchTime, attempt.getLaunchTime());
}
minLaunchTime = minLaunchTime == Long.MAX_VALUE ? -1 : minLaunchTime;
report.setStartTime(minLaunchTime);
report.setFinishTime(taskInfo.getFinishTime());
report.setTaskState(getState());
report.setProgress(getProgress());
Counters counters = getCounters();
if (counters == null) {
counters = EMPTY_COUNTERS;
}
report.setRawCounters(counters);
if (successfulAttempt != null) {
report.setSuccessfulAttempt(successfulAttempt);
}
report.addAllDiagnostics(reportDiagnostics);
report.addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TestJobHistoryParsing method testMultipleFailedTasks.
@Test
public void testMultipleFailedTasks() throws Exception {
JobHistoryParser parser = new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
EventReader reader = Mockito.mock(EventReader.class);
// Hack!
final AtomicInteger numEventsRead = new AtomicInteger(0);
final org.apache.hadoop.mapreduce.TaskType taskType = org.apache.hadoop.mapreduce.TaskType.MAP;
final TaskID[] tids = new TaskID[2];
final JobID jid = new JobID("1", 1);
tids[0] = new TaskID(jid, taskType, 0);
tids[1] = new TaskID(jid, taskType, 1);
Mockito.when(reader.getNextEvent()).thenAnswer(new Answer<HistoryEvent>() {
public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
// send two task start and two task fail events for tasks 0 and 1
int eventId = numEventsRead.getAndIncrement();
TaskID tid = tids[eventId & 0x1];
if (eventId < 2) {
return new TaskStartedEvent(tid, 0, taskType, "");
}
if (eventId < 4) {
TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType, "failed", "FAILED", null, new Counters());
tfe.setDatum(tfe.getDatum());
return tfe;
}
if (eventId < 5) {
JobUnsuccessfulCompletionEvent juce = new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0, "JOB_FAILED", Collections.singletonList("Task failed: " + tids[0].toString()));
return juce;
}
return null;
}
});
JobInfo info = parser.parse(reader);
assertTrue("Task 0 not implicated", info.getErrorInfo().contains(tids[0].toString()));
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TestMRJobs method verifySleepJobCounters.
protected void verifySleepJobCounters(Job job) throws InterruptedException, IOException {
Counters counters = job.getCounters();
Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS).getValue());
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(numSleepReducers, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TestUberAM method verifyRandomWriterCounters.
@Override
protected void verifyRandomWriterCounters(Job job) throws InterruptedException, IOException {
super.verifyRandomWriterCounters(job);
Counters counters = job.getCounters();
Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS).getValue());
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
Aggregations