use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class FlinkInterpreterTest method testResumeStreamSqlFromSavePoint.
// TODO(zjffdu) flaky test
// @Test
public void testResumeStreamSqlFromSavePoint() throws IOException, InterpreterException, InterruptedException, TimeoutException {
String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(1000);
InterpreterResult result = interpreter.interpret(initStreamScalaScript, getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
File savePointDir = FileUtils.getTempDirectory();
final Waiter waiter = new Waiter();
Thread thread = new Thread(() -> {
try {
InterpreterContext context = getInterpreterContext();
context.getLocalProperties().put("savePointDir", savePointDir.getAbsolutePath());
context.getLocalProperties().put("parallelism", "1");
context.getLocalProperties().put("maxParallelism", "10");
InterpreterResult result2 = interpreter.interpret("val table = stenv.sqlQuery(\"select url, count(1) as pv from " + "log group by url\")\nz.show(table, streamType=\"update\")", context);
System.out.println("------------" + context.out.toString());
System.out.println("------------" + result2);
waiter.assertTrue(context.out.toString().contains("url\tpv\n"));
waiter.assertEquals(InterpreterResult.Code.SUCCESS, result2.code());
} catch (Exception e) {
e.printStackTrace();
waiter.fail("Should not fail here");
}
waiter.resume();
});
thread.start();
// the streaming job will run for 60 seconds. check init_stream.scala
// sleep 20 seconds to make sure the job is started but not finished
Thread.sleep(20 * 1000);
InterpreterContext context = getInterpreterContext();
context.getLocalProperties().put("savePointDir", savePointDir.getAbsolutePath());
context.getLocalProperties().put("parallelism", "2");
context.getLocalProperties().put("maxParallelism", "10");
interpreter.cancel(context);
waiter.await(20 * 1000);
// resume job from savepoint
interpreter.interpret("val table = stenv.sqlQuery(\"select url, count(1) as pv from " + "log group by url\")\nz.show(table, streamType=\"update\")", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
TestCase.assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("url\tpv\n"));
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class IPyFlinkInterpreterTest method testUpdateStreamTableApi.
public static void testUpdateStreamTableApi(Interpreter interpreter, Interpreter flinkScalaInterpreter) throws IOException, InterpreterException {
String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(100);
InterpreterContext context = createInterpreterContext();
InterpreterResult result = flinkScalaInterpreter.interpret(initStreamScalaScript, context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
context = createInterpreterContext();
String code = "table = st_env.sql_query('select url, count(1) as pv from log group by url')\nz.show(table,stream_type='update')";
result = interpreter.interpret(code, context);
assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("url\tpv\n"));
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class IPyFlinkInterpreterTest method testSingleStreamTableApi.
public static void testSingleStreamTableApi(Interpreter interpreter, Interpreter flinkScalaInterpreter) throws IOException, InterpreterException {
String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(100);
InterpreterContext context = createInterpreterContext();
InterpreterResult result = flinkScalaInterpreter.interpret(initStreamScalaScript, context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
context = createInterpreterContext();
String code = "table = st_env.sql_query('select max(rowtime), count(1) from log')\nz.show(table,stream_type='single',template = 'Total Count: {1} <br/> {0}')";
result = interpreter.interpret(code, context);
assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.ANGULAR, resultMessages.get(0).getType());
assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("Total Count"));
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class FlinkBatchSqlInterpreterTest method testMultipleInsertInto.
@Test
public void testMultipleInsertInto() throws InterpreterException, IOException {
hiveShell.execute("create table source_table (id int, name string)");
hiveShell.execute("insert into source_table values(1, 'a'), (2, 'b')");
File destDir = Files.createTempDirectory("flink_test").toFile();
FileUtils.deleteDirectory(destDir);
InterpreterResult result = sqlInterpreter.interpret("CREATE TABLE sink_table (\n" + "id int,\n" + "name string" + ") WITH (\n" + "'format.field-delimiter'=',',\n" + "'connector.type'='filesystem',\n" + "'format.derive-schema'='true',\n" + "'connector.path'='" + destDir.getAbsolutePath() + "',\n" + "'format.type'='csv'\n" + ");", getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
File destDir2 = Files.createTempDirectory("flink_test").toFile();
FileUtils.deleteDirectory(destDir2);
result = sqlInterpreter.interpret("CREATE TABLE sink_table2 (\n" + "id int,\n" + "name string" + ") WITH (\n" + "'format.field-delimiter'=',',\n" + "'connector.type'='filesystem',\n" + "'format.derive-schema'='true',\n" + "'connector.path'='" + destDir2.getAbsolutePath() + "',\n" + "'format.type'='csv'\n" + ");", getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// insert into
InterpreterContext context = getInterpreterContext();
result = sqlInterpreter.interpret("insert into sink_table select * from source_table;insert into sink_table2 select * from source_table", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals("Insertion successfully.\nInsertion successfully.\n", resultMessages.get(0).getData());
// verify insert into via select from sink_table
context = getInterpreterContext();
result = sqlInterpreter.interpret("select * from sink_table", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
resultMessages = context.out.toInterpreterResultMessage();
assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
context = getInterpreterContext();
result = sqlInterpreter.interpret("select * from sink_table2", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
resultMessages = context.out.toInterpreterResultMessage();
assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
// insert into (runAsOne)
destDir.delete();
destDir2.delete();
context = getInterpreterContext();
context.getLocalProperties().put("runAsOne", "true");
result = sqlInterpreter.interpret("insert into sink_table select * from source_table;insert into sink_table2 select * from source_table", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
resultMessages = context.out.toInterpreterResultMessage();
assertEquals("Insertion successfully.\n", resultMessages.get(0).getData());
// verify insert into via select from sink_table
context = getInterpreterContext();
result = sqlInterpreter.interpret("select * from sink_table", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
resultMessages = context.out.toInterpreterResultMessage();
assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
context = getInterpreterContext();
result = sqlInterpreter.interpret("select * from sink_table2", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
resultMessages = context.out.toInterpreterResultMessage();
assertEquals("id\tname\n1\ta\n2\tb\n", resultMessages.get(0).getData());
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class FlinkBatchSqlInterpreterTest method testFunctionHintRowType.
@Test
public void testFunctionHintRowType() throws InterpreterException, IOException {
// define table function with TableHint of Row return type
InterpreterContext context = getInterpreterContext();
InterpreterResult result = flinkInterpreter.interpret("import org.apache.flink.table.annotation.FunctionHint\n" + "import org.apache.flink.table.functions.TableFunction\n" + "import org.apache.flink.types.Row\n" + "import org.apache.flink.api.scala._\n" + "import org.apache.flink.table.annotation.DataTypeHint\n" + "\n" + "@FunctionHint(output = new DataTypeHint(\"ROW<sum STRING, result INT>\"))\n" + "class OverloadedFunction extends TableFunction[Row] {\n" + " def eval(a: Int, b: Int): Unit = {\n" + " collect(Row.of(\"Sum\", Int.box(a + b)))\n" + " }\n" + "}\n" + "\n" + "btenv.createTemporarySystemFunction(\"SumUdf\", new OverloadedFunction())", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
context = getInterpreterContext();
result = sqlInterpreter.interpret("select * FROM LATERAL TABLE(SumUdf(1,2));", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
assertEquals("sum\tresult\nSum\t3\n", resultMessages.get(0).getData());
}
Aggregations