use of org.apache.hadoop.mapred.JobClient in project incubator-systemml by apache.
the class InfrastructureAnalyzer method getClusterUtilization.
/**
* Gets the fraction of running map/reduce tasks to existing
* map/reduce task slots.
*
* NOTE: on YARN the number of slots is a spurious indicator
* because containers are purely scheduled based on memory.
*
* @param mapOnly if true, only look at map tasks
* @return cluster utilization (current / capacity)
* @throws IOException if IOException occurs
*/
public static double getClusterUtilization(boolean mapOnly) throws IOException {
//in local mode, the cluster utilization is always 0.0
JobConf job = ConfigurationManager.getCachedJobConf();
JobClient client = new JobClient(job);
ClusterStatus stat = client.getClusterStatus();
double ret = 0.0;
if (//if in cluster mode
stat != null) {
if (mapOnly) {
int capacity = stat.getMaxMapTasks();
int current = stat.getMapTasks();
ret = ((double) current) / capacity;
} else {
int capacity = stat.getMaxMapTasks() + stat.getMaxReduceTasks();
int current = stat.getMapTasks() + stat.getReduceTasks();
ret = ((double) current) / capacity;
}
}
return ret;
}
use of org.apache.hadoop.mapred.JobClient in project incubator-systemml by apache.
the class MRJobConfiguration method setNumReducers.
public static void setNumReducers(JobConf job, long numReducerGroups, int numFromCompiler) throws IOException {
JobClient client = new JobClient(job);
int n = client.getClusterStatus().getMaxReduceTasks();
//correction max number of reducers on yarn clusters
if (InfrastructureAnalyzer.isYarnEnabled())
n = (int) Math.max(n, YarnClusterAnalyzer.getNumCores() / 2);
n = Math.min(n, ConfigurationManager.getNumReducers());
n = Math.min(n, numFromCompiler);
if (numReducerGroups > 0)
n = (int) Math.min(n, numReducerGroups);
job.setNumReduceTasks(n);
}
use of org.apache.hadoop.mapred.JobClient in project voldemort by voldemort.
the class AbstractHadoopJob method run.
public void run(JobConf conf) throws Exception {
_runningJob = new JobClient(conf).submitJob(conf);
info("See " + _runningJob.getTrackingURL() + " for details.");
_runningJob.waitForCompletion();
if (!_runningJob.isSuccessful()) {
throw new Exception("Hadoop job:" + getId() + " failed!");
}
// dump all counters
Counters counters = _runningJob.getCounters();
for (String groupName : counters.getGroupNames()) {
Counters.Group group = counters.getGroup(groupName);
info("Group: " + group.getDisplayName());
for (Counter counter : group) info(counter.getDisplayName() + ":\t" + counter.getValue());
}
}
use of org.apache.hadoop.mapred.JobClient in project compiler by boalang.
the class BoaOutputCommitter method abortJob.
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
super.abortJob(context, runState);
final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
String diag = "";
for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0)) switch(event.getTaskStatus()) {
case SUCCEEDED:
break;
case FAILED:
case KILLED:
case OBSOLETE:
case TIPFAILED:
diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId())) diag += s + "\n";
diag += "\n";
break;
}
updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
use of org.apache.hadoop.mapred.JobClient in project cdap by caskdata.
the class MRJobClient method getMRJobInfo.
/**
* @param runId for which information will be returned.
* @return a {@link MRJobInfo} containing information about a particular MapReduce program run.
* @throws IOException if there is failure to communicate through the JobClient.
* @throws NotFoundException if a Job with the given runId is not found.
*/
public MRJobInfo getMRJobInfo(Id.Run runId) throws IOException, NotFoundException {
Preconditions.checkArgument(ProgramType.MAPREDUCE.equals(runId.getProgram().getType()));
JobClient jobClient = new JobClient(hConf);
JobStatus[] jobs = jobClient.getAllJobs();
JobStatus thisJob = findJobForRunId(jobs, runId.toEntityId());
RunningJob runningJob = jobClient.getJob(thisJob.getJobID());
if (runningJob == null) {
throw new IllegalStateException(String.format("JobClient returned null for RunId: '%s', JobId: '%s'", runId, thisJob.getJobID()));
}
Counters counters = runningJob.getCounters();
TaskReport[] mapTaskReports = jobClient.getMapTaskReports(thisJob.getJobID());
TaskReport[] reduceTaskReports = jobClient.getReduceTaskReports(thisJob.getJobID());
return new MRJobInfo(runningJob.mapProgress(), runningJob.reduceProgress(), groupToMap(counters.getGroup(TaskCounter.class.getName())), toMRTaskInfos(mapTaskReports), toMRTaskInfos(reduceTaskReports), true);
}
Aggregations