use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hadoop by apache.
the class TestGetApplicationsRequest method testGetApplicationsRequest.
@Test
public void testGetApplicationsRequest() {
GetApplicationsRequest request = GetApplicationsRequest.newInstance();
EnumSet<YarnApplicationState> appStates = EnumSet.of(YarnApplicationState.ACCEPTED);
request.setApplicationStates(appStates);
Set<String> tags = new HashSet<String>();
tags.add("tag1");
request.setApplicationTags(tags);
Set<String> types = new HashSet<String>();
types.add("type1");
request.setApplicationTypes(types);
long startBegin = System.currentTimeMillis();
long startEnd = System.currentTimeMillis() + 1;
request.setStartRange(startBegin, startEnd);
long finishBegin = System.currentTimeMillis() + 2;
long finishEnd = System.currentTimeMillis() + 3;
request.setFinishRange(finishBegin, finishEnd);
long limit = 100L;
request.setLimit(limit);
Set<String> queues = new HashSet<String>();
queues.add("queue1");
request.setQueues(queues);
Set<String> users = new HashSet<String>();
users.add("user1");
request.setUsers(users);
ApplicationsRequestScope scope = ApplicationsRequestScope.ALL;
request.setScope(scope);
GetApplicationsRequest requestFromProto = new GetApplicationsRequestPBImpl(((GetApplicationsRequestPBImpl) request).getProto());
// verify the whole record equals with original record
Assert.assertEquals(requestFromProto, request);
// verify all properties are the same as original request
Assert.assertEquals("ApplicationStates from proto is not the same with original request", requestFromProto.getApplicationStates(), appStates);
Assert.assertEquals("ApplicationTags from proto is not the same with original request", requestFromProto.getApplicationTags(), tags);
Assert.assertEquals("ApplicationTypes from proto is not the same with original request", requestFromProto.getApplicationTypes(), types);
Assert.assertEquals("StartRange from proto is not the same with original request", requestFromProto.getStartRange(), new LongRange(startBegin, startEnd));
Assert.assertEquals("FinishRange from proto is not the same with original request", requestFromProto.getFinishRange(), new LongRange(finishBegin, finishEnd));
Assert.assertEquals("Limit from proto is not the same with original request", requestFromProto.getLimit(), limit);
Assert.assertEquals("Queues from proto is not the same with original request", requestFromProto.getQueues(), queues);
Assert.assertEquals("Users from proto is not the same with original request", requestFromProto.getUsers(), users);
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hadoop by apache.
the class AppsBlock method fetchData.
protected void fetchData() throws YarnException, IOException, InterruptedException {
reqAppStates = EnumSet.noneOf(YarnApplicationState.class);
String reqStateString = $(APP_STATE);
if (reqStateString != null && !reqStateString.isEmpty()) {
String[] appStateStrings = reqStateString.split(",");
for (String stateString : appStateStrings) {
reqAppStates.add(YarnApplicationState.valueOf(stateString.trim()));
}
}
callerUGI = getCallerUGI();
final GetApplicationsRequest request = GetApplicationsRequest.newInstance(reqAppStates);
String appsNumStr = $(APPS_NUM);
if (appsNumStr != null && !appsNumStr.isEmpty()) {
long appsNum = Long.parseLong(appsNumStr);
request.setLimit(appsNum);
}
String appStartedTimeBegainStr = $(APP_START_TIME_BEGIN);
long appStartedTimeBegain = 0;
if (appStartedTimeBegainStr != null && !appStartedTimeBegainStr.isEmpty()) {
appStartedTimeBegain = Long.parseLong(appStartedTimeBegainStr);
if (appStartedTimeBegain < 0) {
throw new BadRequestException("app.started-time.begin must be greater than 0");
}
}
String appStartedTimeEndStr = $(APP_START_TIME_END);
long appStartedTimeEnd = Long.MAX_VALUE;
if (appStartedTimeEndStr != null && !appStartedTimeEndStr.isEmpty()) {
appStartedTimeEnd = Long.parseLong(appStartedTimeEndStr);
if (appStartedTimeEnd < 0) {
throw new BadRequestException("app.started-time.end must be greater than 0");
}
}
if (appStartedTimeBegain > appStartedTimeEnd) {
throw new BadRequestException("app.started-time.end must be greater than app.started-time.begin");
}
request.setStartRange(new LongRange(appStartedTimeBegain, appStartedTimeEnd));
if (callerUGI == null) {
appReports = appBaseProt.getApplications(request).getApplicationList();
} else {
appReports = callerUGI.doAs(new PrivilegedExceptionAction<Collection<ApplicationReport>>() {
@Override
public Collection<ApplicationReport> run() throws Exception {
return appBaseProt.getApplications(request).getApplicationList();
}
});
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hadoop by apache.
the class TestRM method testInvalidateAMHostPortWhenAMFailedOrKilled.
// This is to test AM Host and rpc port are invalidated after the am attempt
// is killed or failed, so that client doesn't get the wrong information.
@Test(timeout = 80000)
public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
MockRM rm1 = new MockRM(conf);
rm1.start();
// a succeeded app
RMApp app1 = rm1.submitApp(200);
MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
nm1.registerNode();
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am1);
// a failed app
RMApp app2 = rm1.submitApp(200);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
rm1.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.FAILED);
rm1.waitForState(app2.getApplicationId(), RMAppState.FAILED);
// a killed app
RMApp app3 = rm1.submitApp(200);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
rm1.killApp(app3.getApplicationId());
rm1.waitForState(app3.getApplicationId(), RMAppState.KILLED);
rm1.waitForState(am3.getApplicationAttemptId(), RMAppAttemptState.KILLED);
GetApplicationsRequest request1 = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED, YarnApplicationState.KILLED, YarnApplicationState.FAILED));
GetApplicationsResponse response1 = rm1.getClientRMService().getApplications(request1);
List<ApplicationReport> appList1 = response1.getApplicationList();
Assert.assertEquals(3, appList1.size());
for (ApplicationReport report : appList1) {
// killed/failed apps host and rpc port are invalidated.
if (report.getApplicationId().equals(app2.getApplicationId()) || report.getApplicationId().equals(app3.getApplicationId())) {
Assert.assertEquals("N/A", report.getHost());
Assert.assertEquals(-1, report.getRpcPort());
}
// succeeded app's host and rpc port is not invalidated
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertFalse(report.getHost().equals("N/A"));
Assert.assertTrue(report.getRpcPort() != -1);
}
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hive by apache.
the class WebHCatJTShim23 method getYarnChildJobs.
/**
* Queries RM for the list of applications with the given tag that have started
* after the given timestamp.
*/
private Set<ApplicationId> getYarnChildJobs(String tag, long timestamp) {
Set<ApplicationId> childYarnJobs = new HashSet<ApplicationId>();
LOG.info(String.format("Querying RM for tag = %s, starting with ts = %s", tag, timestamp));
GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
gar.setScope(ApplicationsRequestScope.OWN);
gar.setStartRange(timestamp, System.currentTimeMillis());
gar.setApplicationTags(Collections.singleton(tag));
try {
ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
GetApplicationsResponse apps = proxy.getApplications(gar);
List<ApplicationReport> appsList = apps.getApplicationList();
for (ApplicationReport appReport : appsList) {
childYarnJobs.add(appReport.getApplicationId());
}
} catch (IOException ioe) {
throw new RuntimeException("Exception occurred while finding child jobs", ioe);
} catch (YarnException ye) {
throw new RuntimeException("Exception occurred while finding child jobs", ye);
}
return childYarnJobs;
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hadoop by apache.
the class YarnClientImpl method getApplications.
@Override
public List<ApplicationReport> getApplications(Set<String> queues, Set<String> users, Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException {
GetApplicationsRequest request = GetApplicationsRequest.newInstance(applicationTypes, applicationStates);
request.setQueues(queues);
request.setUsers(users);
GetApplicationsResponse response = rmClient.getApplications(request);
return response.getApplicationList();
}
Aggregations