use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.
the class ClientServiceDelegate method getTaskReports.
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID);
GetTaskReportsRequest request = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
request.setJobId(jobId);
request.setTaskType(TypeConverter.toYarn(taskType));
List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports = ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class, request)).getTaskReportList();
return TypeConverter.fromYarn(taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.
the class TestHsWebServicesAttempts method verifyTaskAttemptGeneric.
public void verifyTaskAttemptGeneric(TaskAttempt ta, TaskType ttype, String id, String state, String type, String rack, String nodeHttpAddress, String diagnostics, String assignedContainerId, long startTime, long finishTime, long elapsedTime, float progress) {
TaskAttemptId attid = ta.getID();
String attemptId = MRApps.toString(attid);
WebServicesTestUtils.checkStringMatch("id", attemptId, id);
WebServicesTestUtils.checkStringMatch("type", ttype.toString(), type);
WebServicesTestUtils.checkStringMatch("state", ta.getState().toString(), state);
WebServicesTestUtils.checkStringMatch("rack", ta.getNodeRackName(), rack);
WebServicesTestUtils.checkStringMatch("nodeHttpAddress", ta.getNodeHttpAddress(), nodeHttpAddress);
String expectDiag = "";
List<String> diagnosticsList = ta.getDiagnostics();
if (diagnosticsList != null && !diagnostics.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagnosticsList) {
b.append(diag);
}
expectDiag = b.toString();
}
WebServicesTestUtils.checkStringMatch("diagnostics", expectDiag, diagnostics);
WebServicesTestUtils.checkStringMatch("assignedContainerId", ta.getAssignedContainerID().toString(), assignedContainerId);
assertEquals("startTime wrong", ta.getLaunchTime(), startTime);
assertEquals("finishTime wrong", ta.getFinishTime(), finishTime);
assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
assertEquals("progress wrong", ta.getProgress() * 100, progress, 1e-3f);
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.
the class TestHsWebServicesTasks method testTasksQueryInvalid.
@Test
public void testTasksQueryInvalid() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
// tasktype must be exactly either "m" or "r"
String tasktype = "reduce";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type", tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertResponseStatusCode(Status.BAD_REQUEST, response.getStatusInfo());
assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, response.getType().toString());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: tasktype must be either m or r", message);
WebServicesTestUtils.checkStringMatch("exception type", "BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname", "org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.
the class TestMRApps method testTaskIDtoString.
//TODO_get.set
@Test(timeout = 120000)
public void testTaskIDtoString() {
TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
tid.getJobId().setAppId(ApplicationId.newInstance(0, 0));
tid.setTaskType(TaskType.MAP);
TaskType type = tid.getTaskType();
System.err.println(type);
type = TaskType.REDUCE;
System.err.println(type);
System.err.println(tid.getTaskType());
assertEquals("task_0_0000_m_000000", MRApps.toString(tid));
tid.setTaskType(TaskType.REDUCE);
assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.
the class TestCheckpointPreemptionPolicy method testStrictPreemptionContract.
@Test
public void testStrictPreemptionContract() {
final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {
@Override
public TaskAttemptId getTaskAttempt(ContainerId cId) {
return containers.get(cId);
}
@Override
public List<Container> getContainers(TaskType t) {
List<Container> p = new ArrayList<Container>();
for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
if (ent.getValue().getTaskId().getTaskType().equals(t)) {
p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
}
}
return p;
}
};
PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(1024, 1), true);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(mActxt);
policy.preempt(mPctxt, pM);
for (ContainerId c : preemptedContainers) {
TaskAttemptId t = assignedContainers.get(c);
if (TaskType.MAP.equals(t.getTaskId().getTaskType())) {
assert policy.isPreempted(t) == false;
} else {
assert policy.isPreempted(t);
}
}
}
Aggregations