Search in sources :

Example 6 with ExecutionContext

use of org.apache.zeppelin.interpreter.ExecutionContext in project zeppelin by apache.

the class TimeoutLifecycleManagerTest method testTimeout_2.

@Test
public void testTimeout_2() throws InterpreterException, InterruptedException, IOException {
    assertTrue(interpreterFactory.getInterpreter("test.sleep", new ExecutionContext("user1", "note1", "test")) instanceof RemoteInterpreter);
    final RemoteInterpreter remoteInterpreter = (RemoteInterpreter) interpreterFactory.getInterpreter("test.sleep", new ExecutionContext("user1", "note1", "test"));
    // simulate how zeppelin submit paragraph
    remoteInterpreter.getScheduler().submit(new Job<Object>("test-job", null) {

        @Override
        public Object getReturn() {
            return null;
        }

        @Override
        public int progress() {
            return 0;
        }

        @Override
        public Map<String, Object> info() {
            return null;
        }

        @Override
        protected Object jobRun() throws Throwable {
            InterpreterContext context = InterpreterContext.builder().setNoteId("noteId").setParagraphId("paragraphId").build();
            return remoteInterpreter.interpret("100000", context);
        }

        @Override
        protected boolean jobAbort() {
            return false;
        }

        @Override
        public void setResult(Object results) {
        }
    });
    while (!remoteInterpreter.isOpened()) {
        Thread.sleep(1000);
        LOGGER.info("Wait for interpreter to be started");
    }
    InterpreterSetting interpreterSetting = interpreterSettingManager.getInterpreterSettingByName("test");
    assertEquals(1, interpreterSetting.getAllInterpreterGroups().size());
    Thread.sleep(15 * 1000);
    // interpreterGroup is not timeout because getStatus is called periodically.
    assertEquals(1, interpreterSetting.getAllInterpreterGroups().size());
    assertTrue(remoteInterpreter.isOpened());
}
Also used : ExecutionContext(org.apache.zeppelin.interpreter.ExecutionContext) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) Map(java.util.Map) RemoteInterpreter(org.apache.zeppelin.interpreter.remote.RemoteInterpreter) AbstractInterpreterTest(org.apache.zeppelin.interpreter.AbstractInterpreterTest) Test(org.junit.Test)

Example 7 with ExecutionContext

use of org.apache.zeppelin.interpreter.ExecutionContext in project zeppelin by apache.

the class NotebookTest method testAutoRestartInterpreterAfterSchedule.

// @Test
public void testAutoRestartInterpreterAfterSchedule() throws InterruptedException, IOException, InterpreterNotFoundException {
    // create a note and a paragraph
    String noteId = notebook.createNote("note1", anonymous);
    // use write lock, because we are overwrite the note configuration
    notebook.processNote(noteId, note -> {
        Paragraph p = note.addNewParagraph(AuthenticationInfo.ANONYMOUS);
        Map<String, Object> config = new HashMap<>();
        p.setConfig(config);
        p.setText("%mock1 sleep 1000");
        Paragraph p2 = note.addNewParagraph(AuthenticationInfo.ANONYMOUS);
        p2.setConfig(config);
        p2.setText("%mock2 sleep 500");
        // set cron scheduler, once a second
        config = note.getConfig();
        config.put("enabled", true);
        config.put("cron", "1/3 * * * * ?");
        config.put("releaseresource", true);
        note.setConfig(config);
        return null;
    });
    schedulerService.refreshCron(noteId);
    ExecutionContext executionContext = new ExecutionContext(anonymous.getUser(), noteId, "test");
    RemoteInterpreter mock1 = (RemoteInterpreter) interpreterFactory.getInterpreter("mock1", executionContext);
    RemoteInterpreter mock2 = (RemoteInterpreter) interpreterFactory.getInterpreter("mock2", executionContext);
    // wait until interpreters are started
    while (!mock1.isOpened() || !mock2.isOpened()) {
        Thread.yield();
    }
    // wait until interpreters are closed
    while (mock1.isOpened() || mock2.isOpened()) {
        Thread.yield();
    }
    // remove cron scheduler.
    // use write lock because config is overwritten
    notebook.processNote(noteId, note -> {
        Map<String, Object> config = note.getConfig();
        config.put("cron", null);
        note.setConfig(config);
        return null;
    });
    schedulerService.refreshCron(noteId);
    // make sure all paragraph has been executed
    notebook.processNote(noteId, note -> {
        for (Paragraph p : note.getParagraphs()) {
            assertNotNull(p);
        }
        return null;
    });
    notebook.removeNote(noteId, anonymous);
}
Also used : ExecutionContext(org.apache.zeppelin.interpreter.ExecutionContext) HashMap(java.util.HashMap) RemoteInterpreter(org.apache.zeppelin.interpreter.remote.RemoteInterpreter)

Example 8 with ExecutionContext

use of org.apache.zeppelin.interpreter.ExecutionContext in project zeppelin by apache.

the class YarnInterpreterLauncherIntegrationTest method testLaunchShellInYarn.

@Test
public void testLaunchShellInYarn() throws YarnException, InterpreterException, InterruptedException {
    InterpreterSetting shellInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("sh");
    shellInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
    shellInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
    Interpreter shellInterpreter = interpreterFactory.getInterpreter("sh", new ExecutionContext("user1", "note1", "sh"));
    InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
    InterpreterResult interpreterResult = shellInterpreter.interpret("pwd", context);
    assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
    assertTrue(interpreterResult.toString(), interpreterResult.message().get(0).getData().contains("/usercache/"));
    Thread.sleep(1000);
    // 1 yarn application launched
    GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    assertEquals(1, response.getApplicationList().size());
    interpreterSettingManager.close();
}
Also used : Interpreter(org.apache.zeppelin.interpreter.Interpreter) ExecutionContext(org.apache.zeppelin.interpreter.ExecutionContext) GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) Test(org.junit.Test)

Example 9 with ExecutionContext

use of org.apache.zeppelin.interpreter.ExecutionContext in project zeppelin by apache.

the class YarnInterpreterLauncherIntegrationTest method testJdbcPython_YarnLauncher.

@Test
public void testJdbcPython_YarnLauncher() throws InterpreterException, YarnException, InterruptedException {
    InterpreterSetting jdbcInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("jdbc");
    jdbcInterpreterSetting.setProperty("default.driver", "com.mysql.jdbc.Driver");
    jdbcInterpreterSetting.setProperty("default.url", "jdbc:mysql://localhost:3306/");
    jdbcInterpreterSetting.setProperty("default.user", "root");
    jdbcInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
    jdbcInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512");
    jdbcInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
    Dependency dependency = new Dependency("mysql:mysql-connector-java:5.1.46");
    jdbcInterpreterSetting.setDependencies(Arrays.asList(dependency));
    interpreterSettingManager.restart(jdbcInterpreterSetting.getId());
    jdbcInterpreterSetting.waitForReady(60 * 1000);
    InterpreterSetting pythonInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("python");
    pythonInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
    pythonInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512");
    pythonInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
    Interpreter jdbcInterpreter = interpreterFactory.getInterpreter("jdbc", new ExecutionContext("user1", "note1", "test"));
    assertNotNull("JdbcInterpreter is null", jdbcInterpreter);
    InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").setAuthenticationInfo(AuthenticationInfo.ANONYMOUS).build();
    InterpreterResult interpreterResult = jdbcInterpreter.interpret("show databases;", context);
    assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
    context.getLocalProperties().put("saveAs", "table_1");
    interpreterResult = jdbcInterpreter.interpret("SELECT 1 as c1, 2 as c2;", context);
    assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
    assertEquals(1, interpreterResult.message().size());
    assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType());
    assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData());
    // read table_1 from python interpreter
    Interpreter pythonInterpreter = interpreterFactory.getInterpreter("python", new ExecutionContext("user1", "note1", "test"));
    assertNotNull("PythonInterpreter is null", pythonInterpreter);
    context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").setAuthenticationInfo(AuthenticationInfo.ANONYMOUS).build();
    interpreterResult = pythonInterpreter.interpret("df=z.getAsDataFrame('table_1')\nz.show(df)", context);
    assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
    assertEquals(1, interpreterResult.message().size());
    assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType());
    assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData());
    // 2 yarn application launched
    GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    assertEquals(2, response.getApplicationList().size());
    interpreterSettingManager.close();
    // sleep for 5 seconds to make sure yarn apps are finished
    Thread.sleep(5 * 1000);
    request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    assertEquals(0, response.getApplicationList().size());
}
Also used : Interpreter(org.apache.zeppelin.interpreter.Interpreter) ExecutionContext(org.apache.zeppelin.interpreter.ExecutionContext) GetApplicationsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse) InterpreterSetting(org.apache.zeppelin.interpreter.InterpreterSetting) InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) Dependency(org.apache.zeppelin.dep.Dependency) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) Test(org.junit.Test)

Example 10 with ExecutionContext

use of org.apache.zeppelin.interpreter.ExecutionContext in project zeppelin by apache.

the class Paragraph method isValidInterpreter.

public boolean isValidInterpreter(String replName) {
    try {
        ExecutionContext executionContext = note.getExecutionContext();
        executionContext.setUser(user);
        executionContext.setInterpreterGroupId(interpreterGroupId);
        return note.getInterpreterFactory().getInterpreter(replName, executionContext) != null;
    } catch (InterpreterNotFoundException e) {
        return false;
    }
}
Also used : ExecutionContext(org.apache.zeppelin.interpreter.ExecutionContext) InterpreterNotFoundException(org.apache.zeppelin.interpreter.InterpreterNotFoundException)

Aggregations

ExecutionContext (org.apache.zeppelin.interpreter.ExecutionContext)18 InterpreterContext (org.apache.zeppelin.interpreter.InterpreterContext)13 Interpreter (org.apache.zeppelin.interpreter.Interpreter)11 InterpreterResult (org.apache.zeppelin.interpreter.InterpreterResult)11 InterpreterSetting (org.apache.zeppelin.interpreter.InterpreterSetting)11 Test (org.junit.Test)11 GetApplicationsRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest)5 GetApplicationsResponse (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse)5 RemoteInterpreter (org.apache.zeppelin.interpreter.remote.RemoteInterpreter)4 HashMap (java.util.HashMap)2 Waiter (net.jodah.concurrentunit.Waiter)2 Dependency (org.apache.zeppelin.dep.Dependency)2 AbstractInterpreterTest (org.apache.zeppelin.interpreter.AbstractInterpreterTest)2 InterpreterException (org.apache.zeppelin.interpreter.InterpreterException)2 File (java.io.File)1 FileReader (java.io.FileReader)1 IOException (java.io.IOException)1 EnumSet (java.util.EnumSet)1 List (java.util.List)1 Map (java.util.Map)1