use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class RMContainerAllocator method handleUpdatedNodes.
@SuppressWarnings("unchecked")
private void handleUpdatedNodes(AllocateResponse response) {
// send event to the job about on updated nodes
List<NodeReport> updatedNodes = response.getUpdatedNodes();
if (!updatedNodes.isEmpty()) {
// send event to the job to act upon completed tasks
eventHandler.handle(new JobUpdatedNodesEvent(getJob().getID(), updatedNodes));
// act upon running tasks
HashSet<NodeId> unusableNodes = new HashSet<NodeId>();
for (NodeReport nr : updatedNodes) {
NodeState nodeState = nr.getNodeState();
if (nodeState.isUnusable()) {
unusableNodes.add(nr.getNodeId());
}
}
for (int i = 0; i < 2; ++i) {
HashMap<TaskAttemptId, Container> taskSet = i == 0 ? assignedRequests.maps : assignedRequests.reduces;
// kill running containers
for (Map.Entry<TaskAttemptId, Container> entry : taskSet.entrySet()) {
TaskAttemptId tid = entry.getKey();
NodeId taskAttemptNodeId = entry.getValue().getNodeId();
if (unusableNodes.contains(taskAttemptNodeId)) {
LOG.info("Killing taskAttempt:" + tid + " because it is running on unusable node:" + taskAttemptNodeId);
// If map, reschedule next task attempt.
boolean rescheduleNextAttempt = (i == 0) ? true : false;
eventHandler.handle(new TaskAttemptKillEvent(tid, "TaskAttempt killed because it ran on unusable node" + taskAttemptNodeId, rescheduleNextAttempt));
}
}
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class MRAppMaster method parsePreviousJobHistory.
private void parsePreviousJobHistory() throws IOException {
FSDataInputStream in = getPreviousJobHistoryStream(getConfig(), appAttemptID);
JobHistoryParser parser = new JobHistoryParser(in);
JobInfo jobInfo = parser.parse();
Exception parseException = parser.getParseException();
if (parseException != null) {
LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException);
}
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = taskInfo.getAllTaskAttempts().entrySet().iterator();
while (taskAttemptIterator.hasNext()) {
Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
taskAttemptIterator.remove();
}
}
completedTasksFromPreviousRun.put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
LOG.info("Read from history task " + TypeConverter.toYarn(taskInfo.getTaskId()));
}
}
LOG.info("Read completed tasks from history " + completedTasksFromPreviousRun.size());
recoveredJobStartTime = jobInfo.getLaunchTime();
// recover AMInfos
List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos();
if (jhAmInfoList != null) {
for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) {
AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(), jhAmInfo.getStartTime(), jhAmInfo.getContainerId(), jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(), jhAmInfo.getNodeManagerHttpPort());
amInfos.add(amInfo);
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TaskImpl method handleTaskAttemptCompletion.
// always called inside a transition, in turn inside the Write Lock
private void handleTaskAttemptCompletion(TaskAttemptId attemptId, TaskAttemptCompletionEventStatus status) {
TaskAttempt attempt = attempts.get(attemptId);
// to nextAttemptNumber
if (attempt.getNodeHttpAddress() != null) {
TaskAttemptCompletionEvent tce = recordFactory.newRecordInstance(TaskAttemptCompletionEvent.class);
tce.setEventId(-1);
String scheme = (encryptedShuffle) ? "https://" : "http://";
tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme + attempt.getNodeHttpAddress().split(":")[0] + ":" + attempt.getShufflePort()));
tce.setStatus(status);
tce.setAttemptId(attempt.getID());
int runTime = 0;
if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() != 0)
runTime = (int) (attempt.getFinishTime() - attempt.getLaunchTime());
tce.setAttemptRunTime(runTime);
//raise the event to job so that it adds the completion event to its
//data structures
eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class MockJobs method newTask.
public static Task newTask(JobId jid, int i, int m, final boolean hasFailedTasks) {
final TaskId tid = Records.newRecord(TaskId.class);
tid.setJobId(jid);
tid.setId(i);
tid.setTaskType(TASK_TYPES.next());
final TaskReport report = newTaskReport(tid);
final Map<TaskAttemptId, TaskAttempt> attempts = newTaskAttempts(tid, m);
return new Task() {
@Override
public TaskId getID() {
return tid;
}
@Override
public TaskReport getReport() {
return report;
}
@Override
public Counters getCounters() {
if (hasFailedTasks) {
return null;
}
return new Counters(TypeConverter.fromYarn(report.getCounters()));
}
@Override
public float getProgress() {
return report.getProgress();
}
@Override
public TaskType getType() {
return tid.getTaskType();
}
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
return attempts;
}
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
return attempts.get(attemptID);
}
@Override
public boolean isFinished() {
switch(report.getTaskState()) {
case SUCCEEDED:
case KILLED:
case FAILED:
return true;
}
return false;
}
@Override
public boolean canCommit(TaskAttemptId taskAttemptID) {
return false;
}
@Override
public TaskState getState() {
return report.getTaskState();
}
};
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class MockJobs method newTaskAttemptReport.
public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(id.getTaskId().getJobId().getAppId(), 0);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
report.setTaskAttemptId(id);
report.setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
report.setFinishTime(System.currentTimeMillis() + (int) (Math.random() * DT) + 1);
if (id.getTaskId().getTaskType() == TaskType.REDUCE) {
report.setShuffleFinishTime((report.getFinishTime() + report.getStartTime()) / 2);
report.setSortFinishTime((report.getFinishTime() + report.getShuffleFinishTime()) / 2);
}
report.setPhase(PHASES.next());
report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
report.setProgress((float) Math.random());
report.setCounters(TypeConverter.toYarn(newCounters()));
report.setContainerId(containerId);
report.setDiagnosticInfo(DIAGS.next());
report.setStateString("Moving average " + Math.random());
return report;
}
Aggregations