use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class SparkSubmitIntegrationTest method testYarnMode.
@Test
public void testYarnMode() throws InterpreterException, YarnException {
try {
// test SparkSubmitInterpreterSetting
Interpreter sparkSubmitInterpreter = interpreterFactory.getInterpreter("spark-submit", new ExecutionContext("user1", "note1", "test"));
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
String yarnAppName = "yarn_example";
InterpreterResult interpreterResult = sparkSubmitInterpreter.interpret("--master yarn-cluster --class org.apache.spark.examples.SparkPi " + "--conf spark.app.name=" + yarnAppName + " --conf spark.driver.memory=512m " + "--conf spark.executor.memory=512m " + sparkHome + "/examples/jars/spark-examples_2.11-2.4.7.jar", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertTrue(response.getApplicationList().size() >= 1);
List<ApplicationReport> apps = response.getApplicationList().stream().filter(app -> app.getName().equals(yarnAppName)).collect(Collectors.toList());
assertEquals(1, apps.size());
} finally {
interpreterSettingManager.close();
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class SparkSubmitIntegrationTest method testLocalMode.
@Test
public void testLocalMode() throws InterpreterException, YarnException {
try {
// test SparkSubmitInterpreterSetting
Interpreter sparkSubmitInterpreter = interpreterFactory.getInterpreter("spark-submit", new ExecutionContext("user1", "note1", "test"));
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
InterpreterResult interpreterResult = sparkSubmitInterpreter.interpret("--master yarn-cluster --class org.apache.spark.examples.SparkPi " + sparkHome + "/examples/jars/spark-examples_2.11-2.4.7.jar", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
// no yarn application launched
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
assertEquals(0, response.getApplicationList().size());
} finally {
interpreterSettingManager.close();
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest in project zeppelin by apache.
the class SparkSubmitIntegrationTest method testCancelSparkYarnApp.
@Test
public void testCancelSparkYarnApp() throws InterpreterException, YarnException, TimeoutException, InterruptedException {
try {
// test SparkSubmitInterpreterSetting
Interpreter sparkSubmitInterpreter = interpreterFactory.getInterpreter("spark-submit", new ExecutionContext("user1", "note1", "test"));
InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
final Waiter waiter = new Waiter();
Thread thread = new Thread() {
@Override
public void run() {
try {
String yarnAppName = "yarn_cancel_example";
InterpreterResult interpreterResult = sparkSubmitInterpreter.interpret("--master yarn-cluster --class org.apache.spark.examples.SparkPi " + "--conf spark.app.name=" + yarnAppName + " --conf spark.driver.memory=512m " + "--conf spark.executor.memory=512m " + sparkHome + "/examples/jars/spark-examples_2.11-2.4.7.jar", context);
assertEquals(interpreterResult.toString(), InterpreterResult.Code.INCOMPLETE, interpreterResult.code());
assertTrue(interpreterResult.toString(), interpreterResult.toString().contains("Paragraph received a SIGTERM"));
} catch (InterpreterException e) {
waiter.fail("Should not throw exception\n" + ExceptionUtils.getStackTrace(e));
}
waiter.resume();
}
};
thread.start();
long start = System.currentTimeMillis();
long threshold = 120 * 1000;
while ((System.currentTimeMillis() - start) < threshold) {
GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
if (response.getApplicationList().size() >= 1) {
break;
}
Thread.sleep(5 * 1000);
}
sparkSubmitInterpreter.cancel(context);
waiter.await(10000);
} finally {
interpreterSettingManager.close();
}
}
Aggregations