use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestTaskAttemptListenerImpl method testGetTask.
@Test(timeout = 5000)
public void testGetTask() throws IOException {
AppContext appCtx = mock(AppContext.class);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher = mock(Dispatcher.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
MockTaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, hbHandler, policy);
Configuration conf = new Configuration();
listener.init(conf);
listener.start();
JVMId id = new JVMId("foo", 1, true, 1);
WrappedJvmID wid = new WrappedJvmID(id.getJobId(), id.isMap, id.getId());
// Verify ask before registration.
//The JVM ID has not been registered yet so we should kill it.
JvmContext context = new JvmContext();
context.jvmId = id;
JvmTask result = listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
// Verify ask after registration but before launch.
// Don't kill, should be null.
TaskAttemptId attemptID = mock(TaskAttemptId.class);
Task task = mock(Task.class);
//Now put a task with the ID
listener.registerPendingTask(task, wid);
result = listener.getTask(context);
assertNull(result);
// Unregister for more testing.
listener.unregister(attemptID, wid);
// Verify ask after registration and launch
//Now put a task with the ID
listener.registerPendingTask(task, wid);
listener.registerLaunchedTask(attemptID, wid);
verify(hbHandler).register(attemptID);
result = listener.getTask(context);
assertNotNull(result);
assertFalse(result.shouldDie);
// Don't unregister yet for more testing.
//Verify that if we call it again a second time we are told to die.
result = listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.unregister(attemptID, wid);
// Verify after unregistration.
result = listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.stop();
// test JVMID
JVMId jvmid = JVMId.forName("jvm_001_002_m_004");
assertNotNull(jvmid);
try {
JVMId.forName("jvm_001_002_m_004_006");
fail();
} catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "TaskId string : jvm_001_002_m_004_006 is not properly formed");
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class StartEndTimesBase method updateAttempt.
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
TaskAttemptId attemptID = status.id;
TaskId taskID = attemptID.getTaskId();
JobId jobID = taskID.getJobId();
Job job = context.getJob(jobID);
if (job == null) {
return;
}
Task task = job.getTask(taskID);
if (task == null) {
return;
}
Long boxedStart = startTimes.get(attemptID);
long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
TaskAttempt taskAttempt = task.getAttempt(attemptID);
if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
boolean isNew = false;
// is this a new success?
synchronized (doneTasks) {
if (!doneTasks.contains(task)) {
doneTasks.add(task);
isNew = true;
}
}
// local data] we only count the first one.
if (isNew) {
long finish = timestamp;
if (start > 1L && finish > 1L && start <= finish) {
long duration = finish - start;
DataStatistics statistics = dataStatisticsForTask(taskID);
if (statistics != null) {
statistics.add(duration);
}
}
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class AMWebServices method killJobTaskAttempt.
protected Response killJobTaskAttempt(TaskAttempt ta, UserGroupInformation callerUGI, HttpServletRequest hsr) throws IOException, InterruptedException {
Preconditions.checkNotNull(ta, "ta cannot be null");
String userName = callerUGI.getUserName();
final TaskAttemptId attemptId = ta.getID();
try {
callerUGI.doAs(new PrivilegedExceptionAction<KillTaskAttemptResponse>() {
@Override
public KillTaskAttemptResponse run() throws IOException, YarnException {
KillTaskAttemptRequest req = new KillTaskAttemptRequestPBImpl();
req.setTaskAttemptId(attemptId);
return service.forceKillTaskAttempt(req);
}
});
} catch (UndeclaredThrowableException ue) {
// bubble that up to the user
if (ue.getCause() instanceof YarnException) {
YarnException ye = (YarnException) ue.getCause();
if (ye.getCause() instanceof AccessControlException) {
String taId = attemptId.toString();
String msg = "Unauthorized attempt to kill task attempt " + taId + " by remote user " + userName;
return Response.status(Status.FORBIDDEN).entity(msg).build();
} else {
throw ue;
}
} else {
throw ue;
}
}
JobTaskAttemptState ret = new JobTaskAttemptState();
ret.setState(TaskAttemptState.KILLED.toString());
return Response.status(Status.OK).entity(ret).build();
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestCheckpointPreemptionPolicy method setup.
@Before
// mocked generics
@SuppressWarnings("rawtypes")
public void setup() {
ApplicationId appId = ApplicationId.newInstance(200, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
jid = MRBuilderUtils.newJobId(appId, 1);
mActxt = mock(RunningAppContext.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(mActxt.getEventHandler()).thenReturn(ea);
for (int i = 0; i < 40; ++i) {
ContainerId cId = ContainerId.newContainerId(appAttemptId, i);
if (0 == i % 7) {
preemptedContainers.add(cId);
}
TaskId tId = 0 == i % 2 ? MRBuilderUtils.newTaskId(jid, i / 2, TaskType.MAP) : MRBuilderUtils.newTaskId(jid, i / 2 + 1, TaskType.REDUCE);
assignedContainers.put(cId, MRBuilderUtils.newTaskAttemptId(tId, 0));
contToResourceMap.put(cId, Resource.newInstance(2 * minAlloc, 2));
}
for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
System.out.println("cont:" + ent.getKey().getContainerId() + " type:" + ent.getValue().getTaskId().getTaskType() + " res:" + contToResourceMap.get(ent.getKey()).getMemorySize() + "MB");
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestCheckpointPreemptionPolicy method validatePreemption.
private List<TaskAttemptId> validatePreemption(PreemptionMessage pM, CheckpointAMPreemptionPolicy policy, int supposedMemPreemption) {
Resource effectivelyPreempted = Resource.newInstance(0, 0);
List<TaskAttemptId> preempting = new ArrayList<TaskAttemptId>();
for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
if (policy.isPreempted(ent.getValue())) {
Resources.addTo(effectivelyPreempted, contToResourceMap.get(ent.getKey()));
// preempt only reducers
if (policy.isPreempted(ent.getValue())) {
assertEquals(TaskType.REDUCE, ent.getValue().getTaskId().getTaskType());
preempting.add(ent.getValue());
}
}
}
// preempt enough
assert (effectivelyPreempted.getMemorySize() >= supposedMemPreemption) : " preempted: " + effectivelyPreempted.getMemorySize();
// preempt not too much enough
assert effectivelyPreempted.getMemorySize() <= supposedMemPreemption + minAlloc;
return preempting;
}
Aggregations