use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class IPyFlinkInterpreterTest method testCancelStreamSql.
public static void testCancelStreamSql(Interpreter interpreter, Interpreter flinkScalaInterpreter) throws IOException, InterpreterException, InterruptedException, TimeoutException {
String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(1000);
InterpreterResult result = flinkScalaInterpreter.interpret(initStreamScalaScript, createInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
final Waiter waiter = new Waiter();
Thread thread = new Thread(() -> {
try {
InterpreterContext context = createInterpreterContext();
InterpreterResult result2 = interpreter.interpret("table = st_env.sql_query('select url, count(1) as pv from " + "log group by url')\nz.show(table, stream_type='update')", context);
LOGGER.info("---------------" + context.out.toString());
LOGGER.info("---------------" + result2);
waiter.assertEquals(InterpreterResult.Code.ERROR, result2.code());
} catch (Exception e) {
e.printStackTrace();
waiter.fail("Should not fail here");
}
waiter.resume();
});
thread.start();
// the streaming job will run for 20 seconds. check init_stream.scala
// sleep 20 seconds to make sure the job is started but not finished
Thread.sleep(20 * 1000);
InterpreterContext context = createInterpreterContext();
interpreter.cancel(context);
waiter.await(10 * 1000);
// resume job
interpreter.interpret("table = st_env.sql_query('select url, count(1) as pv from " + "log group by url')\nz.show(table, stream_type='update')", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(context.out.toString(), InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
TestCase.assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("url\tpv\n"));
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class IPyFlinkInterpreterTest method testAppendStreamTableApi.
public static void testAppendStreamTableApi(Interpreter interpreter, Interpreter flinkScalaInterpreter) throws IOException, InterpreterException {
String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(100);
InterpreterContext context = createInterpreterContext();
InterpreterResult result = flinkScalaInterpreter.interpret(initStreamScalaScript, context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
context = createInterpreterContext();
String code = "table = st_env.sql_query(\"select TUMBLE_START(rowtime, INTERVAL '5' SECOND) as " + "start_time, url, count(1) as pv from log group by " + "TUMBLE(rowtime, INTERVAL '5' SECOND), url\")\nz.show(table,stream_type='append')";
result = interpreter.interpret(code, context);
assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("url\tpv\n"));
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class IPyFlinkInterpreterTest method testResumeStreamSqlFromSavePoint.
public static void testResumeStreamSqlFromSavePoint(Interpreter interpreter, Interpreter flinkScalaInterpreter) throws IOException, InterpreterException, InterruptedException, TimeoutException {
String initStreamScalaScript = FlinkStreamSqlInterpreterTest.getInitStreamScript(1000);
InterpreterResult result = flinkScalaInterpreter.interpret(initStreamScalaScript, createInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
File savePointDir = FileUtils.getTempDirectory();
final Waiter waiter = new Waiter();
Thread thread = new Thread(() -> {
try {
InterpreterContext context = createInterpreterContext();
context.getLocalProperties().put("savePointDir", savePointDir.getAbsolutePath());
context.getLocalProperties().put("parallelism", "1");
context.getLocalProperties().put("maxParallelism", "10");
InterpreterResult result2 = interpreter.interpret("table = st_env.sql_query('select url, count(1) as pv from " + "log group by url')\nz.show(table, stream_type='update')", context);
System.out.println("------------" + context.out.toString());
System.out.println("------------" + result2);
waiter.assertTrue(context.out.toString().contains("url\tpv\n"));
} catch (Exception e) {
e.printStackTrace();
waiter.fail("Should not fail here");
}
waiter.resume();
});
thread.start();
// the streaming job will run for 60 seconds. check init_stream.scala
// sleep 20 seconds to make sure the job is started but not finished
Thread.sleep(20 * 1000);
InterpreterContext context = createInterpreterContext();
context.getLocalProperties().put("savePointDir", savePointDir.getAbsolutePath());
context.getLocalProperties().put("parallelism", "2");
context.getLocalProperties().put("maxParallelism", "10");
interpreter.cancel(context);
waiter.await(20 * 1000);
// resume job from savepoint
interpreter.interpret("table = st_env.sql_query('select url, count(1) as pv from " + "log group by url')\nz.show(table, stream_type='update')", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
LOGGER.info("---------------" + context.out.toString());
assertEquals(resultMessages.get(0).toString(), InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
TestCase.assertTrue(resultMessages.toString(), resultMessages.get(0).getData().contains("url\tpv\n"));
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class LivySparkSQLInterpreter method interpret.
@Override
public InterpreterResult interpret(String line, InterpreterContext context) {
try {
if (StringUtils.isEmpty(line)) {
return new InterpreterResult(InterpreterResult.Code.SUCCESS, "");
}
// use triple quote so that we don't need to do string escape.
String sqlQuery = null;
if (isSpark2) {
if (tableWithUTFCharacter()) {
sqlQuery = "val df = spark.sql(\"\"\"" + line + "\"\"\")\n" + "for ( col <- df.columns ) {\n" + " print(col+\"\\t\")\n" + "}\n" + "println\n" + "df.toJSON.take(" + maxResult + ").foreach(println)";
} else {
sqlQuery = "spark.sql(\"\"\"" + line + "\"\"\").show(" + maxResult + ", " + truncate + ")";
}
} else {
sqlQuery = "sqlContext.sql(\"\"\"" + line + "\"\"\").show(" + maxResult + ", " + truncate + ")";
}
InterpreterResult result = sparkInterpreter.interpret(sqlQuery, context);
if (result.code() == InterpreterResult.Code.SUCCESS) {
InterpreterResult result2 = new InterpreterResult(InterpreterResult.Code.SUCCESS);
for (InterpreterResultMessage message : result.message()) {
// the future release of livy.
if (message.getType() == InterpreterResult.Type.TEXT) {
List<String> rows;
if (tableWithUTFCharacter()) {
rows = parseSQLJsonOutput(message.getData());
} else {
rows = parseSQLOutput(message.getData());
}
result2.add(InterpreterResult.Type.TABLE, StringUtils.join(rows, "\n"));
if (rows.size() >= (maxResult + 1)) {
result2.add(ResultMessages.getExceedsLimitRowsMessage(maxResult, ZEPPELIN_LIVY_SPARK_SQL_MAX_RESULT));
}
} else {
result2.add(message.getType(), message.getData());
}
}
return result2;
} else {
return result;
}
} catch (Exception e) {
LOGGER.error("Exception in LivySparkSQLInterpreter while interpret ", e);
return new InterpreterResult(InterpreterResult.Code.ERROR, InterpreterUtils.getMostRelevantMessage(e));
}
}
use of org.apache.zeppelin.interpreter.InterpreterResultMessage in project zeppelin by apache.
the class JDBCInterpreterTest method testSelectQuery.
@Test
public void testSelectQuery() throws IOException, InterpreterException {
Properties properties = new Properties();
properties.setProperty("common.max_count", "1000");
properties.setProperty("common.max_retry", "3");
properties.setProperty("default.driver", "org.h2.Driver");
properties.setProperty("default.url", getJdbcConnection());
properties.setProperty("default.user", "");
properties.setProperty("default.password", "");
JDBCInterpreter t = new JDBCInterpreter(properties);
t.open();
String sqlQuery = "select * from test_table WHERE ID in ('a', 'b'); ";
InterpreterResult interpreterResult = t.interpret(sqlQuery, context);
assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
assertEquals("ID\tNAME\na\ta_name\nb\tb_name\n", resultMessages.get(0).getData());
context = getInterpreterContext();
context.getLocalProperties().put("limit", "1");
interpreterResult = t.interpret(sqlQuery, context);
resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
assertEquals("ID\tNAME\na\ta_name\n", resultMessages.get(0).getData());
}
Aggregations