use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class FlinkIntegrationTest method testYarnMode.
// TODO(zjffdu) enable it when make yarn integration test work
@Test
public void testYarnMode() throws IOException, InterpreterException, YarnException {
InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
flinkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
flinkInterpreterSetting.setProperty("PATH", hadoopHome + "/bin:" + System.getenv("PATH"));
flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
flinkInterpreterSetting.setProperty("flink.execution.mode", "yarn");
flinkInterpreterSetting.setProperty("zeppelin.flink.run.asLoginUser", "false");
testInterpreterBasics();
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
interpreterSettingManager.close();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class FlinkIntegrationTest method testLocalMode.
@Test
public void testLocalMode() throws IOException, YarnException, InterpreterException {
InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
flinkInterpreterSetting.setProperty("flink.execution.mode", "local");
testInterpreterBasics();
// no yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(0, response.getApplicationList().size());
interpreterSettingManager.close();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class SparkIntegrationTest method testLocalMode.
@Test
public void testLocalMode() throws IOException, YarnException, InterpreterException, XmlPullParserException {
assumeTrue("Hadoop version mismatch, skip test", isHadoopVersionMatch());
InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
sparkInterpreterSetting.setProperty("spark.master", "local[*]");
sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
sparkInterpreterSetting.setProperty("spark.user.name", "#{user}");
try {
setUpSparkInterpreterSetting(sparkInterpreterSetting);
testInterpreterBasics();
// no yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(0, response.getApplicationList().size());
} finally {
interpreterSettingManager.close();
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class YarnInterpreterLauncherIntegrationTest method testLaunchShellInYarn.
@Test
public void testLaunchShellInYarn() throws YarnException, InterpreterException, InterruptedException {
InterpreterSetting shellInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("sh");
shellInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
shellInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
Interpreter shellInterpreter = interpreterFactory.getInterpreter("sh", new ExecutionContext("user1", "note1", "sh"));
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
InterpreterResult interpreterResult = shellInterpreter.interpret("pwd", context);
assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
assertTrue(interpreterResult.toString(), interpreterResult.message().get(0).getData().contains("/usercache/"));
Thread.sleep(1000);
// 1 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(1, response.getApplicationList().size());
interpreterSettingManager.close();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse in project zeppelin by apache.
the class YarnInterpreterLauncherIntegrationTest method testJdbcPython_YarnLauncher.
@Test
public void testJdbcPython_YarnLauncher() throws InterpreterException, YarnException, InterruptedException {
InterpreterSetting jdbcInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("jdbc");
jdbcInterpreterSetting.setProperty("default.driver", "com.mysql.jdbc.Driver");
jdbcInterpreterSetting.setProperty("default.url", "jdbc:mysql://localhost:3306/");
jdbcInterpreterSetting.setProperty("default.user", "root");
jdbcInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
jdbcInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512");
jdbcInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
Dependency dependency = new Dependency("mysql:mysql-connector-java:5.1.46");
jdbcInterpreterSetting.setDependencies(Arrays.asList(dependency));
interpreterSettingManager.restart(jdbcInterpreterSetting.getId());
jdbcInterpreterSetting.waitForReady(60 * 1000);
InterpreterSetting pythonInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("python");
pythonInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
pythonInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512");
pythonInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
Interpreter jdbcInterpreter = interpreterFactory.getInterpreter("jdbc", new ExecutionContext("user1", "note1", "test"));
assertNotNull("JdbcInterpreter is null", jdbcInterpreter);
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").setAuthenticationInfo(AuthenticationInfo.ANONYMOUS).build();
InterpreterResult interpreterResult = jdbcInterpreter.interpret("show databases;", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
context.getLocalProperties().put("saveAs", "table_1");
interpreterResult = jdbcInterpreter.interpret("SELECT 1 as c1, 2 as c2;", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
assertEquals(1, interpreterResult.message().size());
assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType());
assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData());
// read table_1 from python interpreter
Interpreter pythonInterpreter = interpreterFactory.getInterpreter("python", new ExecutionContext("user1", "note1", "test"));
assertNotNull("PythonInterpreter is null", pythonInterpreter);
context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").setAuthenticationInfo(AuthenticationInfo.ANONYMOUS).build();
interpreterResult = pythonInterpreter.interpret("df=z.getAsDataFrame('table_1')\nz.show(df)", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
assertEquals(1, interpreterResult.message().size());
assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType());
assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData());
// 2 yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(2, response.getApplicationList().size());
interpreterSettingManager.close();
// sleep for 5 seconds to make sure yarn apps are finished
Thread.sleep(5 * 1000);
request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(0, response.getApplicationList().size());
}
Aggregations