use of org.apache.hadoop.mapreduce.v2.api.records.Counters in project hadoop by apache.
the class JHEventHandlerForSigtermTest method mockJob.
private Job mockJob() {
Job mockJob = mock(Job.class);
when(mockJob.getAllCounters()).thenReturn(new Counters());
when(mockJob.getTotalMaps()).thenReturn(10);
when(mockJob.getTotalReduces()).thenReturn(10);
when(mockJob.getName()).thenReturn("mockjob");
return mockJob;
}
use of org.apache.hadoop.mapreduce.v2.api.records.Counters in project hadoop by apache.
the class JHEventHandlerForSigtermTest method testProcessDoneFilesOnLastAMRetry.
// In case of all types of events, process Done files if it's last AM retry
@Test(timeout = 50000)
public void testProcessDoneFilesOnLastAMRetry() throws Exception {
TestParams t = new TestParams(true);
Configuration conf = new Configuration();
JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
verify(jheh, times(0)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.ERROR.toString())));
verify(jheh, times(1)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters())));
verify(jheh, times(2)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.FAILED.toString())));
verify(jheh, times(3)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.KILLED.toString())));
verify(jheh, times(4)).processDoneFiles(any(JobId.class));
mockWriter = jheh.getEventWriter();
verify(mockWriter, times(5)).write(any(HistoryEvent.class));
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.Counters in project hadoop by apache.
the class TestTaskAttemptListenerImpl method testCheckpointIDTracking.
@Test
public void testCheckpointIDTracking() throws IOException, InterruptedException {
SystemClock clock = SystemClock.getInstance();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob = mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
Dispatcher dispatcher = mock(Dispatcher.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
AppContext appCtx = mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
when(appCtx.getEventHandler()).thenReturn(ea);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy) {
@Override
protected void registerHeartbeatHandler(Configuration conf) {
taskHeartbeatHandler = hbHandler;
}
};
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.TASK_PREEMPTION, true);
//conf.setBoolean("preemption.reduce", true);
listener.init(conf);
listener.start();
TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0);
List<Path> partialOut = new ArrayList<Path>();
partialOut.add(new Path("/prev1"));
partialOut.add(new Path("/prev2"));
Counters counters = mock(Counters.class);
final long CBYTES = 64L * 1024 * 1024;
final long CTIME = 4344L;
final Path CLOC = new Path("/test/1");
Counter cbytes = mock(Counter.class);
when(cbytes.getValue()).thenReturn(CBYTES);
Counter ctime = mock(Counter.class);
when(ctime.getValue()).thenReturn(CTIME);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime);
// propagating a taskstatus that contains a checkpoint id
TaskCheckpointID incid = new TaskCheckpointID(new FSCheckpointID(CLOC), partialOut, counters);
listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()), incid);
// and try to get it back
CheckpointID outcid = listener.getCheckpointID(tid.getTaskID());
TaskCheckpointID tcid = (TaskCheckpointID) outcid;
assertEquals(CBYTES, tcid.getCheckpointBytes());
assertEquals(CTIME, tcid.getCheckpointTime());
assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput()));
assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut));
//assert it worked
assert outcid == incid;
listener.stop();
}
use of org.apache.hadoop.mapreduce.v2.api.records.Counters in project hadoop by apache.
the class MockJobs method newTaskAttempt.
public static TaskAttempt newTaskAttempt(TaskId tid, int i) {
final TaskAttemptId taid = Records.newRecord(TaskAttemptId.class);
taid.setTaskId(tid);
taid.setId(i);
final TaskAttemptReport report = newTaskAttemptReport(taid);
return new TaskAttempt() {
@Override
public NodeId getNodeId() throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
@Override
public TaskAttemptId getID() {
return taid;
}
@Override
public TaskAttemptReport getReport() {
return report;
}
@Override
public long getLaunchTime() {
return report.getStartTime();
}
@Override
public long getFinishTime() {
return report.getFinishTime();
}
@Override
public int getShufflePort() {
return ShuffleHandler.DEFAULT_SHUFFLE_PORT;
}
@Override
public Counters getCounters() {
if (report != null && report.getCounters() != null) {
return new Counters(TypeConverter.fromYarn(report.getCounters()));
}
return null;
}
@Override
public float getProgress() {
return report.getProgress();
}
@Override
public Phase getPhase() {
return report.getPhase();
}
@Override
public TaskAttemptState getState() {
return report.getTaskAttemptState();
}
@Override
public boolean isFinished() {
switch(report.getTaskAttemptState()) {
case SUCCEEDED:
case FAILED:
case KILLED:
return true;
}
return false;
}
@Override
public ContainerId getAssignedContainerID() {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(taid.getTaskId().getJobId().getAppId(), 0);
ContainerId id = ContainerId.newContainerId(appAttemptId, 0);
return id;
}
@Override
public String getNodeHttpAddress() {
return "localhost:8042";
}
@Override
public List<String> getDiagnostics() {
return Lists.newArrayList(report.getDiagnosticInfo());
}
@Override
public String getAssignedContainerMgrAddress() {
return "localhost:9998";
}
@Override
public long getShuffleFinishTime() {
return report.getShuffleFinishTime();
}
@Override
public long getSortFinishTime() {
return report.getSortFinishTime();
}
@Override
public String getNodeRackName() {
return "/default-rack";
}
};
}
use of org.apache.hadoop.mapreduce.v2.api.records.Counters in project hadoop by apache.
the class MockJobs method getCounters.
public static Counters getCounters(Collection<Task> tasks) {
List<Task> completedTasks = new ArrayList<Task>();
for (Task task : tasks) {
if (task.getCounters() != null) {
completedTasks.add(task);
}
}
Counters counters = new Counters();
return JobImpl.incrTaskCounters(counters, completedTasks);
}
Aggregations