use of org.apache.hadoop.mapred.JobStatus in project cdap by caskdata.
the class MRJobClient method getMRJobInfo.
/**
* @param runId for which information will be returned.
* @return a {@link MRJobInfo} containing information about a particular MapReduce program run.
* @throws IOException if there is failure to communicate through the JobClient.
* @throws NotFoundException if a Job with the given runId is not found.
*/
public MRJobInfo getMRJobInfo(Id.Run runId) throws IOException, NotFoundException {
Preconditions.checkArgument(ProgramType.MAPREDUCE.equals(runId.getProgram().getType()));
JobClient jobClient = new JobClient(hConf);
JobStatus[] jobs = jobClient.getAllJobs();
JobStatus thisJob = findJobForRunId(jobs, runId.toEntityId());
RunningJob runningJob = jobClient.getJob(thisJob.getJobID());
if (runningJob == null) {
throw new IllegalStateException(String.format("JobClient returned null for RunId: '%s', JobId: '%s'", runId, thisJob.getJobID()));
}
Counters counters = runningJob.getCounters();
TaskReport[] mapTaskReports = jobClient.getMapTaskReports(thisJob.getJobID());
TaskReport[] reduceTaskReports = jobClient.getReduceTaskReports(thisJob.getJobID());
return new MRJobInfo(runningJob.mapProgress(), runningJob.reduceProgress(), groupToMap(counters.getGroup(TaskCounter.class.getName())), toMRTaskInfos(mapTaskReports), toMRTaskInfos(reduceTaskReports), true);
}
use of org.apache.hadoop.mapred.JobStatus in project hive by apache.
the class StatusDelegator method makeStatus.
static QueueStatusBean makeStatus(WebHCatJTShim tracker, JobID jobid, JobState state) throws BadParam, IOException {
JobStatus status = tracker.getJobStatus(jobid);
JobProfile profile = tracker.getJobProfile(jobid);
if (// No such job.
status == null || profile == null)
throw new BadParam("Could not find job " + jobid);
return new QueueStatusBean(state, status, profile);
}
use of org.apache.hadoop.mapred.JobStatus in project hive by apache.
the class ListDelegator method listJobs.
/*
* Gets list of job ids and calls getJobStatus to get status for each job id.
*/
public List<JobItemBean> listJobs(String user, boolean showall, String jobId, int numRecords, boolean showDetails) throws NotAuthorizedException, BadParam, IOException, InterruptedException {
UserGroupInformation ugi = UgiFactory.getUgi(user);
WebHCatJTShim tracker = null;
ArrayList<String> ids = new ArrayList<String>();
try {
tracker = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi);
JobStatus[] jobs = tracker.getAllJobs();
if (jobs != null) {
for (JobStatus job : jobs) {
String id = job.getJobID().toString();
if (showall || user.equals(job.getUsername()))
ids.add(id);
}
}
} catch (IllegalStateException e) {
throw new BadParam(e.getMessage());
} finally {
if (tracker != null)
tracker.close();
}
return getJobStatus(ids, user, showall, jobId, numRecords, showDetails);
}
use of org.apache.hadoop.mapred.JobStatus in project hadoop by apache.
the class DummySocketFactory method testSocketFactory.
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test
public void testSocketFactory() throws IOException {
// Create a standard mini-cluster
Configuration sconf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).numDataNodes(1).build();
final int nameNodePort = cluster.getNameNodePort();
// Get a reference to its DFS directly
FileSystem fs = cluster.getFileSystem();
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs = (DistributedFileSystem) fs;
Configuration cconf = getCustomSocketConfigs(nameNodePort);
fs = FileSystem.get(cconf);
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
JobClient client = null;
MiniMRYarnCluster miniMRYarnCluster = null;
try {
// This will test RPC to the NameNode only.
// could we test Client-DataNode connections?
Path filePath = new Path("/dir");
Assert.assertFalse(directDfs.exists(filePath));
Assert.assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
Assert.assertTrue(directDfs.exists(filePath));
Assert.assertTrue(dfs.exists(filePath));
// This will test RPC to a Resource Manager
fs = FileSystem.get(sconf);
JobConf jobConf = new JobConf();
FileSystem.setDefaultUri(jobConf, fs.getUri().toString());
miniMRYarnCluster = initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf = new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
String[] split = rmAddress.split(":");
jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':' + (Integer.parseInt(split[1]) + 10));
client = new JobClient(jconf);
JobStatus[] jobs = client.jobsToComplete();
Assert.assertTrue(jobs.length == 0);
} finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
Aggregations