Search in sources :

Example 21 with GetApplicationsRequest

use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project hive by apache.

the class KillQueryImpl method getChildYarnJobs.

public static Set<ApplicationId> getChildYarnJobs(Configuration conf, String tag, String doAs, boolean doAsAdmin) throws IOException, YarnException {
    Set<ApplicationId> childYarnJobs = new HashSet<>();
    GetApplicationsRequest gar = GetApplicationsRequest.newInstance();
    gar.setScope(ApplicationsRequestScope.OWN);
    gar.setApplicationTags(Collections.singleton(tag));
    ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
    GetApplicationsResponse apps = proxy.getApplications(gar);
    List<ApplicationReport> appsList = apps.getApplicationList();
    for (ApplicationReport appReport : appsList) {
        if (doAsAdmin) {
            childYarnJobs.add(appReport.getApplicationId());
        } else if (StringUtils.isNotBlank(doAs)) {
            if (appReport.getApplicationTags().contains(QueryState.USERID_TAG + "=" + doAs)) {
                childYarnJobs.add(appReport.getApplicationId());
            }
        }
    }
    if (childYarnJobs.isEmpty()) {
        LOG.info("No child applications found");
    } else {
        LOG.info("Found child YARN applications: " + StringUtils.join(childYarnJobs, ","));
    }
    return childYarnJobs;
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) ApplicationClientProtocol(org.apache.hadoop.yarn.api.ApplicationClientProtocol) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) HashSet(java.util.HashSet)

Example 22 with GetApplicationsRequest

use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.

the class FlinkIntegrationTest method testYarnApplicationMode.

@Test
public void testYarnApplicationMode() throws IOException, InterpreterException, YarnException {
    if (flinkVersion.startsWith("1.10")) {
        LOGGER.info("Skip yarn application mode test for flink 1.10");
        return;
    }
    InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
    flinkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
    flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
    flinkInterpreterSetting.setProperty("PATH", hadoopHome + "/bin:" + System.getenv("PATH"));
    flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
    flinkInterpreterSetting.setProperty("flink.execution.mode", "yarn-application");
    // parameters with whitespace
    flinkInterpreterSetting.setProperty("flink.yarn.appName", "hello flink");
    flinkInterpreterSetting.setProperty("zeppelin.flink.run.asLoginUser", "false");
    testInterpreterBasics();
    // 1 yarn application launched
    GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    assertEquals(1, response.getApplicationList().size());
    assertEquals("hello flink", response.getApplicationList().get(0).getName());
    interpreterSettingManager.close();
}
Also used : GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) Test(org.junit.Test)

Example 23 with GetApplicationsRequest

use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.

the class SparkIntegrationTest method testYarnClusterMode.

@Test
public void testYarnClusterMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
    assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
    InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
    sparkInterpreterSetting.setProperty("spark.master", "yarn-cluster");
    sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
    sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
    sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
    sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
    sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
    sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
    sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
    sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
    sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
    sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
    sparkInterpreterSetting.setProperty("zeppelin.spark.run.asLoginUser", "false");
    // parameters with whitespace
    sparkInterpreterSetting.setProperty("spark.app.name", "hello spark");
    String yarnAppId = null;
    try {
        setUpSparkInterpreterSetting(sparkInterpreterSetting);
        testInterpreterBasics();
        // 1 yarn application launched
        GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
        GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
        assertEquals(1, response.getApplicationList().size());
        assertEquals("hello spark", response.getApplicationList().get(0).getName());
        yarnAppId = response.getApplicationList().get(0).getApplicationId().toString();
    } finally {
        interpreterSettingManager.close();
        waitForYarnAppCompleted(30 * 1000);
        if (yarnAppId != null) {
            // ensure yarn app is finished with SUCCEEDED status.
            final String finalYarnAppId = yarnAppId;
            GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED));
            GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
            List<ApplicationReport> apps = response.getApplicationList().stream().filter(app -> app.getApplicationId().toString().equals(finalYarnAppId)).collect(Collectors.toList());
            assertEquals(1, apps.size());
            assertEquals(FinalApplicationStatus.SUCCEEDED, apps.get(0).getFinalApplicationStatus());
        }
    }
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) BeforeClass(org.junit.BeforeClass) LoggerFactory(org.slf4j.LoggerFactory) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) InterpreterFactory(org.apache.zeppelin.interpreter.InterpreterFactory) ExecutionContext(org.apache.zeppelin.interpreter.ExecutionContext) DownloadUtils(org.apache.zeppelin.interpreter.integration.DownloadUtils) EnumSet(java.util.EnumSet) Interpreter(org.apache.zeppelin.interpreter.Interpreter) InterpreterException(org.apache.zeppelin.interpreter.InterpreterException) InterpreterSettingManager(org.apache.zeppelin.interpreter.InterpreterSettingManager) AfterClass(org.junit.AfterClass) Logger(org.slf4j.Logger) InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) XmlPullParserException(org.codehaus.plexus.util.xml.pull.XmlPullParserException) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) File(java.io.File) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) FinalApplicationStatus(org.apache.hadoop.yarn.api.records.FinalApplicationStatus) InterpreterOption(org.apache.zeppelin.interpreter.InterpreterOption) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) Assume.assumeTrue(org.junit.Assume.assumeTrue) FileReader(java.io.FileReader) MavenXpp3Reader(org.apache.maven.model.io.xpp3.MavenXpp3Reader) Assert.assertEquals(org.junit.Assert.assertEquals) Model(org.apache.maven.model.Model) GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) Test(org.junit.Test)

Example 24 with GetApplicationsRequest

use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.

the class SparkIntegrationTest method testYarnClientMode.

@Test
public void testYarnClientMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
    assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
    InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
    sparkInterpreterSetting.setProperty("spark.master", "yarn-client");
    sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
    sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
    sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
    sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
    sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
    sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
    sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
    sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
    sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
    sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
    sparkInterpreterSetting.setProperty("zeppelin.spark.run.asLoginUser", "false");
    try {
        setUpSparkInterpreterSetting(sparkInterpreterSetting);
        testInterpreterBasics();
        // 1 yarn application launched
        GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
        GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
        assertEquals(1, response.getApplicationList().size());
    } finally {
        interpreterSettingManager.close();
        waitForYarnAppCompleted(30 * 1000);
    }
}
Also used : GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) Test(org.junit.Test)

Example 25 with GetApplicationsRequest

use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.

the class SparkIntegrationTest method waitForYarnAppCompleted.

private void waitForYarnAppCompleted(int timeout) throws YarnException {
    long start = System.currentTimeMillis();
    boolean yarnAppCompleted = false;
    while ((System.currentTimeMillis() - start) < timeout) {
        GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
        GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
        if (response.getApplicationList().isEmpty()) {
            yarnAppCompleted = true;
            break;
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    assertTrue("Yarn app is not completed in " + timeout + " milliseconds.", yarnAppCompleted);
}
Also used : GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest)

Aggregations

GetApplicationsRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest)28 GetApplicationsResponse (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse)21 Test (org.junit.Test)18 ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)12 InterpreterSetting (org.apache.zeppelin.interpreter.InterpreterSetting)9 HashSet (java.util.HashSet)8 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)8 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)6 ExecutionContext (org.apache.zeppelin.interpreter.ExecutionContext)6 Interpreter (org.apache.zeppelin.interpreter.Interpreter)6 InterpreterContext (org.apache.zeppelin.interpreter.InterpreterContext)6 InterpreterResult (org.apache.zeppelin.interpreter.InterpreterResult)6 IOException (java.io.IOException)5 YarnApplicationState (org.apache.hadoop.yarn.api.records.YarnApplicationState)5 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)5 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4 LongRange (org.apache.commons.lang.math.LongRange)3 Configuration (org.apache.hadoop.conf.Configuration)3 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)3 ApplicationClientProtocol (org.apache.hadoop.yarn.api.ApplicationClientProtocol)3