Search in sources :

Example 21 with InterpreterResultMessage

use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.

the class FlinkBatchSqlInterpreterTest method testInsertInto.

@Test
public void testInsertInto() throws InterpreterException, IOException {
    hiveShell.execute("create table source_table (id int, name string)");
    hiveShell.execute("insert into source_table values(1, 'a'), (2, 'b')");
    File destDir = Files.createTempDirectory("flink_test").toFile();
    FileUtils.deleteDirectory(destDir);
    InterpreterResult result = sqlInterpreter.interpret("CREATE TABLE sink_table (\n" + "id int,\n" + "name string" + ") WITH (\n" + "'format.field-delimiter'=',',\n" + "'connector.type'='filesystem',\n" + "'format.derive-schema'='true',\n" + "'connector.path'='" + destDir.getAbsolutePath() + "',\n" + "'format.type'='csv'\n" + ");", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    // insert into
    InterpreterContext context = getInterpreterContext();
    result = sqlInterpreter.interpret("insert into sink_table select * from source_table", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
    assertEquals("Insertion successfully.\n", resultMessages.get(0).getData());
    // verify insert into via select from sink_table
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("select * from sink_table", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
    // insert into again will fail
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("insert into sink_table select * from source_table", context);
    assertEquals(InterpreterResult.Code.ERROR, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertTrue(resultMessages.get(0).getData(), resultMessages.get(0).getData().contains("already exists"));
// insert overwrite into
// context = getInterpreterContext();
// result = sqlInterpreter.interpret(
// "insert overwrite dest_table select id + 1, name from source_table", context);
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// resultMessages = context.out.toInterpreterResultMessage();
// assertEquals("Insertion successfully.\n", resultMessages.get(0).getData());
// 
// // verify insert into via select from the dest_table
// context = getInterpreterContext();
// result = sqlInterpreter.interpret(
// "select * from dest_table", context);
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// resultMessages = context.out.toInterpreterResultMessage();
// assertEquals("id\tname\n2\ta\n3\tb\n", resultMessages.get(0).getData());
// 
// // define scala udf
// result = flinkInterpreter.interpret(
// "class AddOne extends ScalarFunction {\n" +
// "  def eval(a: Int): Int = a + 1\n" +
// "}", getInterpreterContext());
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// 
// result = flinkInterpreter.interpret("btenv.registerFunction(\"addOne\", new AddOne())",
// getInterpreterContext());
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// 
// // insert into dest_table2 using udf
// destDir = Files.createTempDirectory("flink_test").toFile();
// FileUtils.deleteDirectory(destDir);
// result = sqlInterpreter.interpret(
// "CREATE TABLE dest_table2 (\n" +
// "id int,\n" +
// "name string" +
// ") WITH (\n" +
// "'format.field-delimiter'=',',\n" +
// "'connector.type'='filesystem',\n" +
// "'format.derive-schema'='true',\n" +
// "'connector.path'='" + destDir.getAbsolutePath() + "',\n" +
// "'format.type'='csv'\n" +
// ");", getInterpreterContext());
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// 
// context = getInterpreterContext();
// result = sqlInterpreter.interpret(
// "insert into dest_table2 select addOne(id), name from source_table", context);
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// resultMessages = context.out.toInterpreterResultMessage();
// assertEquals("Insertion successfully.\n", resultMessages.get(0).getData());
// 
// // verify insert into via select from the dest table
// context = getInterpreterContext();
// result = sqlInterpreter.interpret(
// "select * from dest_table2", context);
// assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// resultMessages = context.out.toInterpreterResultMessage();
// assertEquals("id\tname\n2\ta\n3\tb\n", resultMessages.get(0).getData());
}
Also used : InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) File(java.io.File) InterpreterResultMessage(org.apache.zeppelin.interpreter.InterpreterResultMessage) Test(org.junit.Test)

Example 22 with InterpreterResultMessage

use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.

the class FlinkBatchSqlInterpreterTest method testSetTableConfig.

@Test
public void testSetTableConfig() throws InterpreterException, IOException {
    hiveShell.execute("create table source_table (id int, name string)");
    hiveShell.execute("insert into source_table values(1, 'a'), (2, 'b')");
    File destDir = Files.createTempDirectory("flink_test").toFile();
    FileUtils.deleteDirectory(destDir);
    InterpreterResult result = sqlInterpreter.interpret("CREATE TABLE sink_table (\n" + "id int,\n" + "name string" + ") WITH (\n" + "'format.field-delimiter'=',',\n" + "'connector.type'='filesystem',\n" + "'format.derive-schema'='true',\n" + "'connector.path'='" + destDir.getAbsolutePath() + "',\n" + "'format.type'='csv'\n" + ");", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    // set parallelism then insert into
    InterpreterContext context = getInterpreterContext();
    result = sqlInterpreter.interpret("set table.exec.resource.default-parallelism=10;" + "insert into sink_table select * from source_table", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
    assertEquals("Insertion successfully.\n", resultMessages.get(0).getData());
    assertEquals(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.defaultValue(), sqlInterpreter.tbenv.getConfig().getConfiguration().get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM));
    // set then insert into
    destDir.delete();
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("set table.optimizer.source.predicate-pushdown-enabled=false;" + "insert into sink_table select * from source_table", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals("Insertion successfully.\n", resultMessages.get(0).getData());
    assertEquals(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.defaultValue(), sqlInterpreter.tbenv.getConfig().getConfiguration().get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM));
    assertEquals(OptimizerConfigOptions.TABLE_OPTIMIZER_SOURCE_PREDICATE_PUSHDOWN_ENABLED.defaultValue(), sqlInterpreter.tbenv.getConfig().getConfiguration().get(OptimizerConfigOptions.TABLE_OPTIMIZER_SOURCE_PREDICATE_PUSHDOWN_ENABLED));
    // invalid config
    destDir.delete();
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("set table.invalid_config=false;" + "insert into sink_table select * from source_table", context);
    assertEquals(InterpreterResult.Code.ERROR, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertTrue(resultMessages.get(0).getData(), resultMessages.get(0).getData().contains("table.invalid_config is not a valid table/sql config"));
}
Also used : InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) File(java.io.File) InterpreterResultMessage(org.apache.zeppelin.interpreter.InterpreterResultMessage) Test(org.junit.Test)

Example 23 with InterpreterResultMessage

use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.

the class FlinkBatchSqlInterpreterTest method testSelect.

@Test
public void testSelect() throws InterpreterException, IOException {
    hiveShell.execute("create table source_table (id int, name string)");
    hiveShell.execute("insert into source_table values(1, 'a'), (2, 'b')");
    // verify select from
    InterpreterContext context = getInterpreterContext();
    InterpreterResult result = sqlInterpreter.interpret("show tables", context);
    List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    assertEquals(1, resultMessages.size());
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    assertEquals(resultMessages.get(0).toString(), "table\nsource_table\n", resultMessages.get(0).getData());
    // verify select from
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("select * from source_table", context);
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    assertEquals(1, resultMessages.size());
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
    // z.show
    if (!flinkInterpreter.getFlinkVersion().isAfterFlink114()) {
        context = getInterpreterContext();
        result = flinkInterpreter.interpret("z.show(btenv.sqlQuery(\"select * from source_table\"))", context);
        resultMessages = context.out.toInterpreterResultMessage();
        assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
        assertEquals(1, resultMessages.size());
        assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
        assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
    }
    // define scala udf
    result = flinkInterpreter.interpret("class AddOne extends ScalarFunction {\n" + "  def eval(a: Int): Int = a + 1\n" + "}", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    result = flinkInterpreter.interpret("stenv.registerFunction(\"addOne\", new AddOne())", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    // select which use scala udf
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("SELECT addOne(id) as add_one FROM source_table", context);
    assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(1, resultMessages.size());
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    assertEquals("add_one\n2\n3\n", resultMessages.get(0).getData());
    // define python udf via PyFlinkInterpreter
    result = pyFlinkInterpreter.interpret("class PythonUpper(ScalarFunction):\n" + "  def eval(self, s):\n" + "    return s.upper()", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    context = getInterpreterContext();
    result = pyFlinkInterpreter.interpret("st_env.register_function(\"python_upper\", " + "udf(PythonUpper(), DataTypes.STRING(), DataTypes.STRING()))", context);
    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
    // resultMessages = context.out.toInterpreterResultMessage();
    // assertEquals(1, resultMessages.size());
    // assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    // assertEquals("add_one\n2\n3\n", resultMessages.get(0).getData());
    // select which use python udf
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("SELECT python_upper(name) as name FROM source_table", context);
    assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(1, resultMessages.size());
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    assertEquals("name\nA\nB\n", resultMessages.get(0).getData());
    // define python udf via IPyFlinkInterpreter
    result = iPyFlinkInterpreter.interpret("class IPythonUpper(ScalarFunction):\n" + "  def eval(self, s):\n" + "    return s.upper()", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    result = iPyFlinkInterpreter.interpret("st_env.register_function(\"ipython_upper\", " + "udf(IPythonUpper(), DataTypes.STRING(), DataTypes.STRING()))", getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    // select which use python udf
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("SELECT ipython_upper(name) as name FROM source_table", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(1, resultMessages.size());
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    assertEquals("name\nA\nB\n", resultMessages.get(0).getData());
    // after these select queries, `show tables` should still show only one source table,
    // other temporary tables should not be displayed.
    context = getInterpreterContext();
    result = sqlInterpreter.interpret("show tables", context);
    resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    assertEquals(1, resultMessages.size());
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    assertEquals(resultMessages.get(0).toString(), "table\nsource_table\n", resultMessages.get(0).getData());
}
Also used : InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) InterpreterResultMessage(org.apache.zeppelin.interpreter.InterpreterResultMessage) Test(org.junit.Test)

Example 24 with InterpreterResultMessage

use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.

the class FlinkInterpreterTest method testZShow.

@Test
public void testZShow() throws InterpreterException, IOException {
    // show dataset
    InterpreterContext context = getInterpreterContext();
    InterpreterResult result = interpreter.interpret("val data = benv.fromElements((1, \"jeff\"), (2, \"andy\"), (3, \"james\"))", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    context = getInterpreterContext();
    result = interpreter.interpret("z.show(data)", context);
    assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
    List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
    if (interpreter.getFlinkVersion().isAfterFlink114()) {
        assertEquals(InterpreterResult.Type.TEXT, resultMessages.get(0).getType());
        assertEquals("z.show(DataSet) is not supported after Flink 1.14", resultMessages.get(0).getData());
    } else {
        assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
        assertEquals("_1\t_2\n1\tjeff\n2\tandy\n3\tjames\n", resultMessages.get(0).getData());
    }
}
Also used : InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) InterpreterResultMessage(org.apache.zeppelin.interpreter.InterpreterResultMessage) Test(org.junit.Test)

Example 25 with InterpreterResultMessage

use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.

the class FlinkInterpreterTest method testCancelStreamSql.

@Test
public void testCancelStreamSql() throws IOException, InterpreterException, InterruptedException, TimeoutException {
    String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(1000);
    InterpreterResult result = interpreter.interpret(initStreamScalaScript, getInterpreterContext());
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    final Waiter waiter = new Waiter();
    Thread thread = new Thread(() -> {
        try {
            InterpreterContext context = getInterpreterContext();
            InterpreterResult result2 = interpreter.interpret("val table = stenv.sqlQuery(\"select url, count(1) as pv from " + "log group by url\")\nz.show(table, streamType=\"update\")", context);
            LOGGER.info("---------------" + context.out.toString());
            LOGGER.info("---------------" + result2);
            waiter.assertTrue(context.out.toString().contains("Job was cancelled"));
            waiter.assertEquals(InterpreterResult.Code.ERROR, result2.code());
        } catch (Exception e) {
            e.printStackTrace();
            waiter.fail("Should not fail here");
        }
        waiter.resume();
    });
    thread.start();
    // the streaming job will run for 20 seconds. check init_stream.scala
    // sleep 20 seconds to make sure the job is started but not finished
    Thread.sleep(20 * 1000);
    InterpreterContext context = getInterpreterContext();
    interpreter.cancel(context);
    waiter.await(10 * 1000);
    // resume job
    interpreter.interpret("val table = stenv.sqlQuery(\"select url, count(1) as pv from " + "log group by url\")\nz.show(table, streamType=\"update\")", context);
    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
    List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
    assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
    TestCase.assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("url\tpv\n"));
}
Also used : InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) Waiter(net.jodah.concurrentunit.Waiter) InterpreterContext(org.apache.zeppelin.interpreter.InterpreterContext) InterpreterResultMessage(org.apache.zeppelin.interpreter.InterpreterResultMessage) TimeoutException(java.util.concurrent.TimeoutException) InterpreterException(org.apache.zeppelin.interpreter.InterpreterException) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

InterpreterResultMessage (org.apache.zeppelin.interpreter.InterpreterResultMessage)80 InterpreterResult (org.apache.zeppelin.interpreter.InterpreterResult)78 Test (org.junit.Test)60 InterpreterContext (org.apache.zeppelin.interpreter.InterpreterContext)55 Properties (java.util.Properties)17 InterpreterException (org.apache.zeppelin.interpreter.InterpreterException)15 IOException (java.io.IOException)13 File (java.io.File)8 TimeoutException (java.util.concurrent.TimeoutException)7 Waiter (net.jodah.concurrentunit.Waiter)7 AuthenticationInfo (org.apache.zeppelin.user.AuthenticationInfo)6 Test (org.junit.jupiter.api.Test)6 UnirestException (com.mashape.unirest.http.exceptions.UnirestException)3 HashMap (java.util.HashMap)3 Matcher (java.util.regex.Matcher)3 Pattern (java.util.regex.Pattern)3 Map (java.util.Map)2 AngularObjectRegistry (org.apache.zeppelin.display.AngularObjectRegistry)2 CheckBox (org.apache.zeppelin.display.ui.CheckBox)2 Select (org.apache.zeppelin.display.ui.Select)2