use of org.apache.hadoop.mapreduce.v2.api.records.JobState in project hadoop by apache.
the class HsWebServices method getJobs.
@GET
@Path("/mapreduce/jobs")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public JobsInfo getJobs(@QueryParam("user") String userQuery, @QueryParam("limit") String count, @QueryParam("state") String stateQuery, @QueryParam("queue") String queueQuery, @QueryParam("startedTimeBegin") String startedBegin, @QueryParam("startedTimeEnd") String startedEnd, @QueryParam("finishedTimeBegin") String finishBegin, @QueryParam("finishedTimeEnd") String finishEnd) {
Long countParam = null;
init();
if (count != null && !count.isEmpty()) {
try {
countParam = Long.parseLong(count);
} catch (NumberFormatException e) {
throw new BadRequestException(e.getMessage());
}
if (countParam <= 0) {
throw new BadRequestException("limit value must be greater then 0");
}
}
Long sBegin = null;
if (startedBegin != null && !startedBegin.isEmpty()) {
try {
sBegin = Long.parseLong(startedBegin);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (sBegin < 0) {
throw new BadRequestException("startedTimeBegin must be greater than 0");
}
}
Long sEnd = null;
if (startedEnd != null && !startedEnd.isEmpty()) {
try {
sEnd = Long.parseLong(startedEnd);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (sEnd < 0) {
throw new BadRequestException("startedTimeEnd must be greater than 0");
}
}
if (sBegin != null && sEnd != null && sBegin > sEnd) {
throw new BadRequestException("startedTimeEnd must be greater than startTimeBegin");
}
Long fBegin = null;
if (finishBegin != null && !finishBegin.isEmpty()) {
try {
fBegin = Long.parseLong(finishBegin);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (fBegin < 0) {
throw new BadRequestException("finishedTimeBegin must be greater than 0");
}
}
Long fEnd = null;
if (finishEnd != null && !finishEnd.isEmpty()) {
try {
fEnd = Long.parseLong(finishEnd);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (fEnd < 0) {
throw new BadRequestException("finishedTimeEnd must be greater than 0");
}
}
if (fBegin != null && fEnd != null && fBegin > fEnd) {
throw new BadRequestException("finishedTimeEnd must be greater than finishedTimeBegin");
}
JobState jobState = null;
if (stateQuery != null) {
jobState = JobState.valueOf(stateQuery);
}
return ctx.getPartialJobs(0l, countParam, userQuery, queueQuery, sBegin, sEnd, fBegin, fEnd, jobState);
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobState in project hadoop by apache.
the class MockJobs method newJob.
public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
final JobId id = newJobID(appID, i);
final String name = newJobName();
final JobReport report = newJobReport(id);
final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
final TaskCount taskCount = getTaskCount(tasks.values());
final Counters counters = getCounters(tasks.values());
final Path configFile = confFile;
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
final Configuration conf = new Configuration();
conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
return new Job() {
@Override
public JobId getID() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public JobState getState() {
return report.getJobState();
}
@Override
public JobReport getReport() {
return report;
}
@Override
public float getProgress() {
return 0;
}
@Override
public Counters getAllCounters() {
return counters;
}
@Override
public Map<TaskId, Task> getTasks() {
return tasks;
}
@Override
public Task getTask(TaskId taskID) {
return tasks.get(taskID);
}
@Override
public int getTotalMaps() {
return taskCount.maps;
}
@Override
public int getTotalReduces() {
return taskCount.reduces;
}
@Override
public int getCompletedMaps() {
return taskCount.completedMaps;
}
@Override
public int getCompletedReduces() {
return taskCount.completedReduces;
}
@Override
public boolean isUber() {
return false;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
return null;
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
return null;
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public List<String> getDiagnostics() {
return Collections.<String>emptyList();
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
return true;
}
@Override
public String getUserName() {
return "mock";
}
@Override
public String getQueueName() {
return "mockqueue";
}
@Override
public Path getConfFile() {
return configFile;
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return jobACLs;
}
@Override
public List<AMInfo> getAMInfos() {
List<AMInfo> amInfoList = new LinkedList<AMInfo>();
amInfoList.add(createAMInfo(1));
amInfoList.add(createAMInfo(2));
return amInfoList;
}
@Override
public Configuration loadConfFile() throws IOException {
FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(configFile), configFile.toString());
return jobConf;
}
@Override
public void setQueueName(String queueName) {
// do nothing
}
@Override
public void setJobPriority(Priority priority) {
// do nothing
}
};
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobState in project hadoop by apache.
the class CachedHistoryStorage method getPartialJobs.
public static JobsInfo getPartialJobs(Collection<Job> jobs, Long offset, Long count, String user, String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, JobState jobState) {
JobsInfo allJobs = new JobsInfo();
if (sBegin == null || sBegin < 0)
sBegin = 0l;
if (sEnd == null)
sEnd = Long.MAX_VALUE;
if (fBegin == null || fBegin < 0)
fBegin = 0l;
if (fEnd == null)
fEnd = Long.MAX_VALUE;
if (offset == null || offset < 0)
offset = 0l;
if (count == null)
count = Long.MAX_VALUE;
if (offset > jobs.size()) {
return allJobs;
}
long at = 0;
long end = offset + count - 1;
if (end < 0) {
// due to overflow
end = Long.MAX_VALUE;
}
for (Job job : jobs) {
if (at > end) {
break;
}
// can't really validate queue is a valid one since queues could change
if (queue != null && !queue.isEmpty()) {
if (!job.getQueueName().equals(queue)) {
continue;
}
}
if (user != null && !user.isEmpty()) {
if (!job.getUserName().equals(user)) {
continue;
}
}
JobReport report = job.getReport();
if (report.getStartTime() < sBegin || report.getStartTime() > sEnd) {
continue;
}
if (report.getFinishTime() < fBegin || report.getFinishTime() > fEnd) {
continue;
}
if (jobState != null && jobState != report.getJobState()) {
continue;
}
at++;
if ((at - 1) < offset) {
continue;
}
JobInfo jobInfo = new JobInfo(job);
allJobs.add(jobInfo);
}
return allJobs;
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobState in project hadoop by apache.
the class TestJobHistory method testRefreshLoadedJobCacheUnSupportedOperation.
@Test
public void testRefreshLoadedJobCacheUnSupportedOperation() {
jobHistory = spy(new JobHistory());
HistoryStorage storage = new HistoryStorage() {
@Override
public void setHistoryFileManager(HistoryFileManager hsManager) {
// TODO Auto-generated method stub
}
@Override
public JobsInfo getPartialJobs(Long offset, Long count, String user, String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, JobState jobState) {
// TODO Auto-generated method stub
return null;
}
@Override
public Job getFullJob(JobId jobId) {
// TODO Auto-generated method stub
return null;
}
@Override
public Map<JobId, Job> getAllPartialJobs() {
// TODO Auto-generated method stub
return null;
}
};
doReturn(storage).when(jobHistory).createHistoryStorage();
jobHistory.init(new Configuration());
jobHistory.start();
Throwable th = null;
try {
jobHistory.refreshLoadedJobCache();
} catch (Exception e) {
th = e;
}
assertTrue(th instanceof UnsupportedOperationException);
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobState in project hadoop by apache.
the class NotRunningJob method getJobReport.
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException {
JobReport jobReport = recordFactory.newRecordInstance(JobReport.class);
jobReport.setJobId(request.getJobId());
jobReport.setJobState(jobState);
jobReport.setUser(applicationReport.getUser());
jobReport.setStartTime(applicationReport.getStartTime());
YarnApplicationState state = applicationReport.getYarnApplicationState();
if (state == YarnApplicationState.KILLED || state == YarnApplicationState.FAILED || state == YarnApplicationState.FINISHED) {
jobReport.setDiagnostics(applicationReport.getDiagnostics());
}
jobReport.setJobName(applicationReport.getName());
jobReport.setTrackingUrl(applicationReport.getTrackingUrl());
jobReport.setFinishTime(applicationReport.getFinishTime());
GetJobReportResponse resp = recordFactory.newRecordInstance(GetJobReportResponse.class);
resp.setJobReport(jobReport);
return resp;
}
Aggregations