Search in sources :

Example 6 with TaskType

use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.

the class TestCheckpointPreemptionPolicy method testPreemptionContract.

@Test
public void testPreemptionContract() {
    final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
    AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {

        @Override
        public TaskAttemptId getTaskAttempt(ContainerId cId) {
            return containers.get(cId);
        }

        @Override
        public List<Container> getContainers(TaskType t) {
            List<Container> p = new ArrayList<Container>();
            for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
                if (ent.getValue().getTaskId().getTaskType().equals(t)) {
                    p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
                }
            }
            return p;
        }
    };
    PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(minAlloc, 1), false);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(mActxt);
    int supposedMemPreemption = (int) pM.getContract().getResourceRequest().get(0).getResourceRequest().getCapability().getMemorySize() * pM.getContract().getResourceRequest().get(0).getResourceRequest().getNumContainers();
    // first round of preemption
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting = validatePreemption(pM, policy, supposedMemPreemption);
    // redundant message
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting2 = validatePreemption(pM, policy, supposedMemPreemption);
    // check that nothing got added
    assert preempting2.equals(preempting);
    // simulate 2 task completions/successful preemption
    policy.handleCompletedContainer(preempting.get(0));
    policy.handleCompletedContainer(preempting.get(1));
    // remove from assignedContainers
    Iterator<Map.Entry<ContainerId, TaskAttemptId>> it = assignedContainers.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<ContainerId, TaskAttemptId> ent = it.next();
        if (ent.getValue().equals(preempting.get(0)) || ent.getValue().equals(preempting.get(1)))
            it.remove();
    }
    // one more message asking for preemption
    policy.preempt(mPctxt, pM);
    // triggers preemption of 2 more containers (i.e., the preemption set changes)
    List<TaskAttemptId> preempting3 = validatePreemption(pM, policy, supposedMemPreemption);
    assert preempting3.equals(preempting2) == false;
}
Also used : RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) PreemptionMessage(org.apache.hadoop.yarn.api.records.PreemptionMessage) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ArrayList(java.util.ArrayList) PreemptionContainer(org.apache.hadoop.yarn.api.records.PreemptionContainer) Container(org.apache.hadoop.yarn.api.records.Container) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 7 with TaskType

use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.

the class MRBuilderUtils method newTaskId.

public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
    TaskId taskId = Records.newRecord(TaskId.class);
    taskId.setJobId(jobId);
    taskId.setId(id);
    taskId.setTaskType(taskType);
    return taskId;
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId)

Example 8 with TaskType

use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.

the class CLI method run.

public int run(String[] argv) throws Exception {
    int exitCode = -1;
    if (argv.length < 1) {
        displayUsage("");
        return exitCode;
    }
    // process arguments
    String cmd = argv[0];
    String submitJobFile = null;
    String jobid = null;
    String taskid = null;
    String historyFileOrJobId = null;
    String historyOutFile = null;
    String historyOutFormat = HistoryViewer.HUMAN_FORMAT;
    String counterGroupName = null;
    String counterName = null;
    JobPriority jp = null;
    String taskType = null;
    String taskState = null;
    int fromEvent = 0;
    int nEvents = 0;
    int jpvalue = 0;
    String configOutFile = null;
    boolean getStatus = false;
    boolean getCounter = false;
    boolean killJob = false;
    boolean listEvents = false;
    boolean viewHistory = false;
    boolean viewAllHistory = false;
    boolean listJobs = false;
    boolean listAllJobs = false;
    boolean listActiveTrackers = false;
    boolean listBlacklistedTrackers = false;
    boolean displayTasks = false;
    boolean killTask = false;
    boolean failTask = false;
    boolean setJobPriority = false;
    boolean logs = false;
    boolean downloadConfig = false;
    if ("-submit".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        submitJobFile = argv[1];
    } else if ("-status".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        getStatus = true;
    } else if ("-counter".equals(cmd)) {
        if (argv.length != 4) {
            displayUsage(cmd);
            return exitCode;
        }
        getCounter = true;
        jobid = argv[1];
        counterGroupName = argv[2];
        counterName = argv[3];
    } else if ("-kill".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        killJob = true;
    } else if ("-set-priority".equals(cmd)) {
        if (argv.length != 3) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        try {
            jp = JobPriority.valueOf(argv[2]);
        } catch (IllegalArgumentException iae) {
            try {
                jpvalue = Integer.parseInt(argv[2]);
            } catch (NumberFormatException ne) {
                LOG.info(ne);
                displayUsage(cmd);
                return exitCode;
            }
        }
        setJobPriority = true;
    } else if ("-events".equals(cmd)) {
        if (argv.length != 4) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        fromEvent = Integer.parseInt(argv[2]);
        nEvents = Integer.parseInt(argv[3]);
        listEvents = true;
    } else if ("-history".equals(cmd)) {
        viewHistory = true;
        if (argv.length < 2 || argv.length > 7) {
            displayUsage(cmd);
            return exitCode;
        }
        // Some arguments are optional while others are not, and some require
        // second arguments.  Due to this, the indexing can vary depending on
        // what's specified and what's left out, as summarized in the below table:
        // [all] <jobHistoryFile|jobId> [-outfile <file>] [-format <human|json>]
        //   1                  2            3       4         5         6
        //   1                  2            3       4
        //   1                  2                              3         4
        //   1                  2
        //                      1            2       3         4         5
        //                      1            2       3
        //                      1                              2         3
        //                      1
        // "all" is optional, but comes first if specified
        int index = 1;
        if ("all".equals(argv[index])) {
            index++;
            viewAllHistory = true;
            if (argv.length == 2) {
                displayUsage(cmd);
                return exitCode;
            }
        }
        // Get the job history file or job id argument
        historyFileOrJobId = argv[index++];
        // "-outfile" is optional, but if specified requires a second argument
        if (argv.length > index + 1 && "-outfile".equals(argv[index])) {
            index++;
            historyOutFile = argv[index++];
        }
        // "-format" is optional, but if specified required a second argument
        if (argv.length > index + 1 && "-format".equals(argv[index])) {
            index++;
            historyOutFormat = argv[index++];
        }
        // Check for any extra arguments that don't belong here
        if (argv.length > index) {
            displayUsage(cmd);
            return exitCode;
        }
    } else if ("-list".equals(cmd)) {
        if (argv.length != 1 && !(argv.length == 2 && "all".equals(argv[1]))) {
            displayUsage(cmd);
            return exitCode;
        }
        if (argv.length == 2 && "all".equals(argv[1])) {
            listAllJobs = true;
        } else {
            listJobs = true;
        }
    } else if ("-kill-task".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        killTask = true;
        taskid = argv[1];
    } else if ("-fail-task".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        failTask = true;
        taskid = argv[1];
    } else if ("-list-active-trackers".equals(cmd)) {
        if (argv.length != 1) {
            displayUsage(cmd);
            return exitCode;
        }
        listActiveTrackers = true;
    } else if ("-list-blacklisted-trackers".equals(cmd)) {
        if (argv.length != 1) {
            displayUsage(cmd);
            return exitCode;
        }
        listBlacklistedTrackers = true;
    } else if ("-list-attempt-ids".equals(cmd)) {
        if (argv.length != 4) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        taskType = argv[2];
        taskState = argv[3];
        displayTasks = true;
        if (!taskTypes.contains(org.apache.hadoop.util.StringUtils.toUpperCase(taskType))) {
            System.out.println("Error: Invalid task-type: " + taskType);
            displayUsage(cmd);
            return exitCode;
        }
        if (!taskStates.contains(org.apache.hadoop.util.StringUtils.toLowerCase(taskState))) {
            System.out.println("Error: Invalid task-state: " + taskState);
            displayUsage(cmd);
            return exitCode;
        }
    } else if ("-logs".equals(cmd)) {
        if (argv.length == 2 || argv.length == 3) {
            logs = true;
            jobid = argv[1];
            if (argv.length == 3) {
                taskid = argv[2];
            } else {
                taskid = null;
            }
        } else {
            displayUsage(cmd);
            return exitCode;
        }
    } else if ("-config".equals(cmd)) {
        downloadConfig = true;
        if (argv.length != 3) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        configOutFile = argv[2];
    } else {
        displayUsage(cmd);
        return exitCode;
    }
    // initialize cluster
    cluster = createCluster();
    // Submit the request
    try {
        if (submitJobFile != null) {
            Job job = Job.getInstance(new JobConf(submitJobFile));
            job.submit();
            System.out.println("Created job " + job.getJobID());
            exitCode = 0;
        } else if (getStatus) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                Counters counters = job.getCounters();
                System.out.println();
                System.out.println(job);
                if (counters != null) {
                    System.out.println(counters);
                } else {
                    System.out.println("Counters not available. Job is retired.");
                }
                exitCode = 0;
            }
        } else if (getCounter) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                Counters counters = job.getCounters();
                if (counters == null) {
                    System.out.println("Counters not available for retired job " + jobid);
                    exitCode = -1;
                } else {
                    System.out.println(getCounter(counters, counterGroupName, counterName));
                    exitCode = 0;
                }
            }
        } else if (killJob) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                JobStatus jobStatus = job.getStatus();
                if (jobStatus.getState() == JobStatus.State.FAILED) {
                    System.out.println("Could not mark the job " + jobid + " as killed, as it has already failed.");
                    exitCode = -1;
                } else if (jobStatus.getState() == JobStatus.State.KILLED) {
                    System.out.println("The job " + jobid + " has already been killed.");
                    exitCode = -1;
                } else if (jobStatus.getState() == JobStatus.State.SUCCEEDED) {
                    System.out.println("Could not kill the job " + jobid + ", as it has already succeeded.");
                    exitCode = -1;
                } else {
                    job.killJob();
                    System.out.println("Killed job " + jobid);
                    exitCode = 0;
                }
            }
        } else if (setJobPriority) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                if (jp != null) {
                    job.setPriority(jp);
                } else {
                    job.setPriorityAsInteger(jpvalue);
                }
                System.out.println("Changed job priority.");
                exitCode = 0;
            }
        } else if (viewHistory) {
            // it's a Job ID
            if (historyFileOrJobId.endsWith(".jhist")) {
                viewHistory(historyFileOrJobId, viewAllHistory, historyOutFile, historyOutFormat);
                exitCode = 0;
            } else {
                Job job = getJob(JobID.forName(historyFileOrJobId));
                if (job == null) {
                    System.out.println("Could not find job " + jobid);
                } else {
                    String historyUrl = job.getHistoryUrl();
                    if (historyUrl == null || historyUrl.isEmpty()) {
                        System.out.println("History file for job " + historyFileOrJobId + " is currently unavailable.");
                    } else {
                        viewHistory(historyUrl, viewAllHistory, historyOutFile, historyOutFormat);
                        exitCode = 0;
                    }
                }
            }
        } else if (listEvents) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                listEvents(job, fromEvent, nEvents);
                exitCode = 0;
            }
        } else if (listJobs) {
            listJobs(cluster);
            exitCode = 0;
        } else if (listAllJobs) {
            listAllJobs(cluster);
            exitCode = 0;
        } else if (listActiveTrackers) {
            listActiveTrackers(cluster);
            exitCode = 0;
        } else if (listBlacklistedTrackers) {
            listBlacklistedTrackers(cluster);
            exitCode = 0;
        } else if (displayTasks) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                displayTasks(getJob(JobID.forName(jobid)), taskType, taskState);
                exitCode = 0;
            }
        } else if (killTask) {
            TaskAttemptID taskID = TaskAttemptID.forName(taskid);
            Job job = getJob(taskID.getJobID());
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else if (job.killTask(taskID, false)) {
                System.out.println("Killed task " + taskid);
                exitCode = 0;
            } else {
                System.out.println("Could not kill task " + taskid);
                exitCode = -1;
            }
        } else if (failTask) {
            TaskAttemptID taskID = TaskAttemptID.forName(taskid);
            Job job = getJob(taskID.getJobID());
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else if (job.killTask(taskID, true)) {
                System.out.println("Killed task " + taskID + " by failing it");
                exitCode = 0;
            } else {
                System.out.println("Could not fail task " + taskid);
                exitCode = -1;
            }
        } else if (logs) {
            JobID jobID = JobID.forName(jobid);
            if (getJob(jobID) == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                try {
                    TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskid);
                    LogParams logParams = cluster.getLogParams(jobID, taskAttemptID);
                    LogCLIHelpers logDumper = new LogCLIHelpers();
                    logDumper.setConf(getConf());
                    exitCode = logDumper.dumpAContainersLogs(logParams.getApplicationId(), logParams.getContainerId(), logParams.getNodeId(), logParams.getOwner());
                } catch (IOException e) {
                    if (e instanceof RemoteException) {
                        throw e;
                    }
                    System.out.println(e.getMessage());
                }
            }
        } else if (downloadConfig) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                String jobFile = job.getJobFile();
                if (jobFile == null || jobFile.isEmpty()) {
                    System.out.println("Config file for job " + jobFile + " could not be found.");
                } else {
                    Path configPath = new Path(jobFile);
                    FileSystem fs = FileSystem.get(getConf());
                    fs.copyToLocalFile(configPath, new Path(configOutFile));
                    exitCode = 0;
                }
            }
        }
    } catch (RemoteException re) {
        IOException unwrappedException = re.unwrapRemoteException();
        if (unwrappedException instanceof AccessControlException) {
            System.out.println(unwrappedException.getMessage());
        } else {
            throw re;
        }
    } finally {
        cluster.close();
    }
    return exitCode;
}
Also used : Path(org.apache.hadoop.fs.Path) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) JobPriority(org.apache.hadoop.mapreduce.JobPriority) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) LogParams(org.apache.hadoop.mapreduce.v2.LogParams) JobStatus(org.apache.hadoop.mapreduce.JobStatus) FileSystem(org.apache.hadoop.fs.FileSystem) LogCLIHelpers(org.apache.hadoop.yarn.logaggregation.LogCLIHelpers) Counters(org.apache.hadoop.mapreduce.Counters) Job(org.apache.hadoop.mapreduce.Job) RemoteException(org.apache.hadoop.ipc.RemoteException) JobConf(org.apache.hadoop.mapred.JobConf) JobID(org.apache.hadoop.mapreduce.JobID)

Example 9 with TaskType

use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.

the class TestContainerLauncherImpl method makeTaskAttemptId.

public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, TaskType taskType, int id) {
    ApplicationId aID = ApplicationId.newInstance(ts, appId);
    JobId jID = MRBuilderUtils.newJobId(aID, id);
    TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
    return MRBuilderUtils.newTaskAttemptId(tID, id);
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Example 10 with TaskType

use of org.apache.hadoop.mapreduce.v2.api.records.TaskType in project hadoop by apache.

the class DefaultSpeculator method maybeScheduleASpeculation.

private int maybeScheduleASpeculation(TaskType type) {
    int successes = 0;
    long now = clock.getTime();
    ConcurrentMap<JobId, AtomicInteger> containerNeeds = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
    for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) {
        //  container prematurely.
        if (jobEntry.getValue().get() > 0) {
            continue;
        }
        int numberSpeculationsAlready = 0;
        int numberRunningTasks = 0;
        // loop through the tasks of the kind
        Job job = context.getJob(jobEntry.getKey());
        Map<TaskId, Task> tasks = job.getTasks(type);
        int numberAllowedSpeculativeTasks = (int) Math.max(minimumAllowedSpeculativeTasks, proportionTotalTasksSpeculatable * tasks.size());
        TaskId bestTaskID = null;
        long bestSpeculationValue = -1L;
        // TODO track the tasks that are potentially worth looking at
        for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) {
            long mySpeculationValue = speculationValue(taskEntry.getKey(), now);
            if (mySpeculationValue == ALREADY_SPECULATING) {
                ++numberSpeculationsAlready;
            }
            if (mySpeculationValue != NOT_RUNNING) {
                ++numberRunningTasks;
            }
            if (mySpeculationValue > bestSpeculationValue) {
                bestTaskID = taskEntry.getKey();
                bestSpeculationValue = mySpeculationValue;
            }
        }
        numberAllowedSpeculativeTasks = (int) Math.max(numberAllowedSpeculativeTasks, proportionRunningTasksSpeculatable * numberRunningTasks);
        // If we found a speculation target, fire it off
        if (bestTaskID != null && numberAllowedSpeculativeTasks > numberSpeculationsAlready) {
            addSpeculativeAttempt(bestTaskID);
            ++successes;
        }
    }
    return successes;
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) ConcurrentMap(java.util.concurrent.ConcurrentMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Aggregations

TaskType (org.apache.hadoop.mapreduce.v2.api.records.TaskType)15 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)8 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)8 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)8 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)7 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)5 Test (org.junit.Test)5 HashMap (java.util.HashMap)4 TaskInfo (org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo)4 Map (java.util.Map)3 Counters (org.apache.hadoop.mapreduce.Counters)3 ClientResponse (com.sun.jersey.api.client.ClientResponse)2 UniformInterfaceException (com.sun.jersey.api.client.UniformInterfaceException)2 WebResource (com.sun.jersey.api.client.WebResource)2 ArrayList (java.util.ArrayList)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 GET (javax.ws.rs.GET)2 Path (javax.ws.rs.Path)2 Produces (javax.ws.rs.Produces)2 Path (org.apache.hadoop.fs.Path)2