use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class SparkIntegrationTest method testYarnClusterMode.
@Test
public void testYarnClusterMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
sparkInterpreterSetting.setProperty("spark.master", "yarn-cluster");
sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
sparkInterpreterSetting.setProperty("zeppelin.spark.run.asLoginUser", "false");
// parameters with whitespace
sparkInterpreterSetting.setProperty("spark.app.name", "hello spark");
String yarnAppId = null;
try {
setUpSparkInterpreterSetting(sparkInterpreterSetting);
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
assertEquals("hello spark", response.getApplicationList().get(0).getName());
yarnAppId = response.getApplicationList().get(0).getApplicationId().toString();
} finally {
interpreterSettingManager.close();
waitForYarnAppCompleted(30 * 1000);
if (yarnAppId != null) {
// ensure yarn app is finished with SUCCEEDED status.
final String finalYarnAppId = yarnAppId;
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
List<ApplicationReport> apps = response.getApplicationList().stream().filter(app -> app.getApplicationId().toString().equals(finalYarnAppId)).collect(Collectors.toList());
assertEquals(1, apps.size());
assertEquals(FinalApplicationStatus.SUCCEEDED, apps.get(0).getFinalApplicationStatus());
}
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class SparkIntegrationTest method testYarnClientMode.
@Test
public void testYarnClientMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
sparkInterpreterSetting.setProperty("spark.master", "yarn-client");
sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
sparkInterpreterSetting.setProperty("zeppelin.spark.run.asLoginUser", "false");
try {
setUpSparkInterpreterSetting(sparkInterpreterSetting);
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
} finally {
interpreterSettingManager.close();
waitForYarnAppCompleted(30 * 1000);
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class SparkIntegrationTest method waitForYarnAppCompleted.
private void waitForYarnAppCompleted(int timeout) throws YarnException {
long start = System.currentTimeMillis();
boolean yarnAppCompleted = false;
while ((System.currentTimeMillis() - start) < timeout) {
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
if (response.getApplicationList().isEmpty()) {
yarnAppCompleted = true;
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
assertTrue("Yarn app is not completed in " + timeout + " milliseconds.", yarnAppCompleted);
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class SparkSubmitIntegrationTest method testYarnMode.
@Test
public void testYarnMode() throws InterpreterException, YarnException {
try {
// test SparkSubmitInterpreterSetting
Interpreter sparkSubmitInterpreter = interpreterFactory.getInterpreter("spark-submit", new ExecutionContext("user1", "note1", "test"));
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
String yarnAppName = "yarn_example";
InterpreterResult interpreterResult = sparkSubmitInterpreter.interpret("--master yarn-cluster --class org.apache.spark.examples.SparkPi " + "--conf spark.app.name=" + yarnAppName + " --conf spark.driver.memory=512m " + "--conf spark.executor.memory=512m " + sparkHome + "/examples/jars/spark-examples_2.11-2.4.7.jar", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertTrue(response.getApplicationList().size() >= 1);
List<ApplicationReport> apps = response.getApplicationList().stream().filter(app -> app.getName().equals(yarnAppName)).collect(Collectors.toList());
assertEquals(1, apps.size());
} finally {
interpreterSettingManager.close();
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class SparkSubmitIntegrationTest method testLocalMode.
@Test
public void testLocalMode() throws InterpreterException, YarnException {
try {
// test SparkSubmitInterpreterSetting
Interpreter sparkSubmitInterpreter = interpreterFactory.getInterpreter("spark-submit", new ExecutionContext("user1", "note1", "test"));
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
InterpreterResult interpreterResult = sparkSubmitInterpreter.interpret("--master yarn-cluster --class org.apache.spark.examples.SparkPi " + sparkHome + "/examples/jars/spark-examples_2.11-2.4.7.jar", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
// no yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(0, response.getApplicationList().size());
} finally {
interpreterSettingManager.close();
}
}
Aggregations