use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.
the class TestTaskAttemptListenerImpl method testGetTask.
@Test(timeout = 5000)
public void testGetTask() throws IOException {
AppContext appCtx = mock(AppContext.class);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher = mock(Dispatcher.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
MockTaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, hbHandler, policy);
Configuration conf = new Configuration();
listener.init(conf);
listener.start();
JVMId id = new JVMId("foo", 1, true, 1);
WrappedJvmID wid = new WrappedJvmID(id.getJobId(), id.isMap, id.getId());
// Verify ask before registration.
//The JVM ID has not been registered yet so we should kill it.
JvmContext context = new JvmContext();
context.jvmId = id;
JvmTask result = listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
// Verify ask after registration but before launch.
// Don't kill, should be null.
TaskAttemptId attemptID = mock(TaskAttemptId.class);
Task task = mock(Task.class);
//Now put a task with the ID
listener.registerPendingTask(task, wid);
result = listener.getTask(context);
assertNull(result);
// Unregister for more testing.
listener.unregister(attemptID, wid);
// Verify ask after registration and launch
//Now put a task with the ID
listener.registerPendingTask(task, wid);
listener.registerLaunchedTask(attemptID, wid);
verify(hbHandler).register(attemptID);
result = listener.getTask(context);
assertNotNull(result);
assertFalse(result.shouldDie);
// Don't unregister yet for more testing.
//Verify that if we call it again a second time we are told to die.
result = listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.unregister(attemptID, wid);
// Verify after unregistration.
result = listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.stop();
// test JVMID
JVMId jvmid = JVMId.forName("jvm_001_002_m_004");
assertNotNull(jvmid);
try {
JVMId.forName("jvm_001_002_m_004_006");
fail();
} catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "TaskId string : jvm_001_002_m_004_006 is not properly formed");
}
}
use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.
the class TaskSpeculationPredicate method canSpeculate.
boolean canSpeculate(AppContext context, TaskId taskID) {
// This class rejects speculating any task that already has speculations,
// or isn't running.
// Subclasses should call TaskSpeculationPredicate.canSpeculate(...) , but
// can be even more restrictive.
JobId jobID = taskID.getJobId();
Job job = context.getJob(jobID);
Task task = job.getTask(taskID);
return task.getAttempts().size() == 1;
}
use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.
the class JobCounterInfo method getCounters.
private void getCounters(AppContext ctx, Job job) {
if (job == null) {
return;
}
total = job.getAllCounters();
boolean needTotalCounters = false;
if (total == null) {
total = new Counters();
needTotalCounters = true;
}
map = new Counters();
reduce = new Counters();
// Get all types of counters
Map<TaskId, Task> tasks = job.getTasks();
for (Task t : tasks.values()) {
Counters counters = t.getCounters();
if (counters == null) {
continue;
}
switch(t.getType()) {
case MAP:
map.incrAllCounters(counters);
break;
case REDUCE:
reduce.incrAllCounters(counters);
break;
}
if (needTotalCounters) {
total.incrAllCounters(counters);
}
}
}
use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.
the class JHEventHandlerForSigtermTest method mockAppContext.
private AppContext mockAppContext(Class<? extends AppContext> contextClass, ApplicationId appId, boolean isLastAMRetry) {
JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId));
AppContext mockContext = mock(contextClass);
Job mockJob = mockJob();
when(mockContext.getJob(jobId)).thenReturn(mockJob);
when(mockContext.getApplicationID()).thenReturn(appId);
when(mockContext.isLastAMRetry()).thenReturn(isLastAMRetry);
if (mockContext instanceof RunningAppContext) {
when(((RunningAppContext) mockContext).getTimelineClient()).thenReturn(TimelineClient.createTimelineClient());
when(((RunningAppContext) mockContext).getTimelineV2Client()).thenReturn(TimelineV2Client.createTimelineClient(ApplicationId.newInstance(0, 1)));
}
return mockContext;
}
use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.
the class MRApp method createJob.
@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState, String diagnostic) {
UserGroupInformation currentUser = null;
try {
currentUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
Job newJob = new TestJob(getJobId(), getAttemptID(), conf, getDispatcher().getEventHandler(), getTaskAttemptListener(), getContext().getClock(), getCommitter(), isNewApiCommitter(), currentUser.getUserName(), getContext(), forcedState, diagnostic);
((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
getDispatcher().register(JobFinishEvent.Type.class, new EventHandler<JobFinishEvent>() {
@Override
public void handle(JobFinishEvent event) {
stop();
}
});
return newJob;
}
Aggregations