use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hive by apache.
the class KillQueryImpl method getChildYarnJobs.
public static Set<ApplicationId> getChildYarnJobs(Configuration conf, String tag, String doAs, boolean doAsAdmin) throws IOException, YarnException {
Set<ApplicationId> childYarnJobs = new HashSet<>();
GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
gar.setScope(ApplicationsRequestScope.OWN);
gar.setApplicationTags(Collections.singleton(tag));
ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
GetApplicationsResponse apps = proxy.getApplications(gar);
List<ApplicationReport> appsList = apps.getApplicationList();
for (ApplicationReport appReport : appsList) {
if (doAsAdmin) {
childYarnJobs.add(appReport.getApplicationId());
} else if (StringUtils.isNotBlank(doAs)) {
if (appReport.getApplicationTags().contains(QueryState.USERID_TAG + "=" + doAs)) {
childYarnJobs.add(appReport.getApplicationId());
}
}
}
if (childYarnJobs.isEmpty()) {
LOG.info("No child applications found");
} else {
LOG.info("Found child YARN applications: " + StringUtils.join(childYarnJobs, ","));
}
return childYarnJobs;
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class FlinkIntegrationTest method testYarnApplicationMode.
@Test
public void testYarnApplicationMode() throws IOException, InterpreterException, YarnException {
if (flinkVersion.startsWith("1.10")) {
LOGGER.info("Skip yarn application mode test for flink 1.10");
return;
}
InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
flinkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
flinkInterpreterSetting.setProperty("PATH", hadoopHome + "/bin:" + System.getenv("PATH"));
flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
flinkInterpreterSetting.setProperty("flink.execution.mode", "yarn-application");
// parameters with whitespace
flinkInterpreterSetting.setProperty("flink.yarn.appName", "hello flink");
flinkInterpreterSetting.setProperty("zeppelin.flink.run.asLoginUser", "false");
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
assertEquals("hello flink", response.getApplicationList().get(0).getName());
interpreterSettingManager.close();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class SparkIntegrationTest method testYarnClusterMode.
@Test
public void testYarnClusterMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
sparkInterpreterSetting.setProperty("spark.master", "yarn-cluster");
sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
sparkInterpreterSetting.setProperty("zeppelin.spark.run.asLoginUser", "false");
// parameters with whitespace
sparkInterpreterSetting.setProperty("spark.app.name", "hello spark");
String yarnAppId = null;
try {
setUpSparkInterpreterSetting(sparkInterpreterSetting);
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
assertEquals("hello spark", response.getApplicationList().get(0).getName());
yarnAppId = response.getApplicationList().get(0).getApplicationId().toString();
} finally {
interpreterSettingManager.close();
waitForYarnAppCompleted(30 * 1000);
if (yarnAppId != null) {
// ensure yarn app is finished with SUCCEEDED status.
final String finalYarnAppId = yarnAppId;
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
List<ApplicationReport> apps = response.getApplicationList().stream().filter(app -> app.getApplicationId().toString().equals(finalYarnAppId)).collect(Collectors.toList());
assertEquals(1, apps.size());
assertEquals(FinalApplicationStatus.SUCCEEDED, apps.get(0).getFinalApplicationStatus());
}
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class SparkIntegrationTest method testYarnClientMode.
@Test
public void testYarnClientMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
sparkInterpreterSetting.setProperty("spark.master", "yarn-client");
sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
sparkInterpreterSetting.setProperty("zeppelin.spark.run.asLoginUser", "false");
try {
setUpSparkInterpreterSetting(sparkInterpreterSetting);
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
} finally {
interpreterSettingManager.close();
waitForYarnAppCompleted(30 * 1000);
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class SparkIntegrationTest method waitForYarnAppCompleted.
private void waitForYarnAppCompleted(int timeout) throws YarnException {
long start = System.currentTimeMillis();
boolean yarnAppCompleted = false;
while ((System.currentTimeMillis() - start) < timeout) {
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
if (response.getApplicationList().isEmpty()) {
yarnAppCompleted = true;
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
assertTrue("Yarn app is not completed in " + timeout + " milliseconds.", yarnAppCompleted);
}
Aggregations