use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project hadoop by apache.
the class TestResourceMgrDelegate method tesAllJobs.
@Test
public void tesAllJobs() throws Exception {
final ApplicationClientProtocol applicationsManager = Mockito.mock(ApplicationClientProtocol.class);
GetApplicationsResponse allApplicationsResponse = Records.newRecord(GetApplicationsResponse.class);
List<ApplicationReport> applications = new ArrayList<ApplicationReport>();
applications.add(getApplicationReport(YarnApplicationState.FINISHED, FinalApplicationStatus.FAILED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED, FinalApplicationStatus.SUCCEEDED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED, FinalApplicationStatus.KILLED));
applications.add(getApplicationReport(YarnApplicationState.FAILED, FinalApplicationStatus.FAILED));
allApplicationsResponse.setApplicationList(applications);
Mockito.when(applicationsManager.getApplications(Mockito.any(GetApplicationsRequest.class))).thenReturn(allApplicationsResponse);
ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(new YarnConfiguration()) {
@Override
protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(applicationsManager);
}
};
JobStatus[] allJobs = resourceMgrDelegate.getAllJobs();
Assert.assertEquals(State.FAILED, allJobs[0].getState());
Assert.assertEquals(State.SUCCEEDED, allJobs[1].getState());
Assert.assertEquals(State.KILLED, allJobs[2].getState());
Assert.assertEquals(State.FAILED, allJobs[3].getState());
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project hadoop by apache.
the class AHSClientImpl method getApplications.
@Override
public List<ApplicationReport> getApplications() throws YarnException, IOException {
GetApplicationsRequest request = GetApplicationsRequest.newInstance(null, null);
GetApplicationsResponse response = ahsClient.getApplications(request);
return response.getApplicationList();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project oozie by apache.
the class LauncherMainException method getChildYarnJobs.
public static Set<ApplicationId> getChildYarnJobs(Configuration actionConf, ApplicationsRequestScope scope, long startTime) {
Set<ApplicationId> childYarnJobs = new HashSet<ApplicationId>();
String tag = actionConf.get(CHILD_MAPREDUCE_JOB_TAGS);
if (tag == null) {
System.out.print("Could not find YARN tags property " + CHILD_MAPREDUCE_JOB_TAGS);
return childYarnJobs;
}
System.out.println("tag id : " + tag);
GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
gar.setScope(scope);
gar.setApplicationTags(Collections.singleton(tag));
long endTime = System.currentTimeMillis();
if (startTime > endTime) {
System.out.println("WARNING: Clock skew between the Oozie server host and this host detected. Please fix this. " + "Attempting to work around...");
// We don't know which one is wrong (relative to the RM), so to be safe, let's assume they're both wrong and add an
// offset in both directions
long diff = 2 * (startTime - endTime);
startTime = startTime - diff;
endTime = endTime + diff;
}
gar.setStartRange(startTime, endTime);
try {
ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(actionConf, ApplicationClientProtocol.class);
GetApplicationsResponse apps = proxy.getApplications(gar);
List<ApplicationReport> appsList = apps.getApplicationList();
for (ApplicationReport appReport : appsList) {
childYarnJobs.add(appReport.getApplicationId());
}
} catch (YarnException | IOException ioe) {
throw new RuntimeException("Exception occurred while finding child jobs", ioe);
}
if (childYarnJobs.isEmpty()) {
System.out.println("No child applications found");
} else {
System.out.println("Found child YARN applications: " + StringUtils.join(childYarnJobs, ","));
}
return childYarnJobs;
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project hive by apache.
the class KillQueryImpl method getChildYarnJobs.
public static Set<ApplicationId> getChildYarnJobs(Configuration conf, String tag, String doAs, boolean doAsAdmin) throws IOException, YarnException {
Set<ApplicationId> childYarnJobs = new HashSet<>();
GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
gar.setScope(ApplicationsRequestScope.OWN);
gar.setApplicationTags(Collections.singleton(tag));
ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
GetApplicationsResponse apps = proxy.getApplications(gar);
List<ApplicationReport> appsList = apps.getApplicationList();
for (ApplicationReport appReport : appsList) {
if (doAsAdmin) {
childYarnJobs.add(appReport.getApplicationId());
} else if (StringUtils.isNotBlank(doAs)) {
if (appReport.getApplicationTags().contains(QueryState.USERID_TAG + "=" + doAs)) {
childYarnJobs.add(appReport.getApplicationId());
}
}
}
if (childYarnJobs.isEmpty()) {
LOG.info("No child applications found");
} else {
LOG.info("Found child YARN applications: " + StringUtils.join(childYarnJobs, ","));
}
return childYarnJobs;
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class FlinkIntegrationTest method testYarnApplicationMode.
@Test
public void testYarnApplicationMode() throws IOException, InterpreterException, YarnException {
if (flinkVersion.startsWith("1.10")) {
LOGGER.info("Skip yarn application mode test for flink 1.10");
return;
}
InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
flinkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
flinkInterpreterSetting.setProperty("PATH", hadoopHome + "/bin:" + System.getenv("PATH"));
flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
flinkInterpreterSetting.setProperty("flink.execution.mode", "yarn-application");
// parameters with whitespace
flinkInterpreterSetting.setProperty("flink.yarn.appName", "hello flink");
flinkInterpreterSetting.setProperty("zeppelin.flink.run.asLoginUser", "false");
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
assertEquals("hello flink", response.getApplicationList().get(0).getName());
interpreterSettingManager.close();
}
Aggregations