use of org.apache.zeppelin.interpreter.SingleRowInterpreterResult in project zeppelin by apache.
the class Spark2Shims method showDataFrame.
@Override
public String showDataFrame(Object obj, int maxResult, InterpreterContext context) {
if (obj instanceof Dataset) {
Dataset<Row> df = ((Dataset) obj).toDF();
String[] columns = df.columns();
// DDL will empty DataFrame
if (columns.length == 0) {
return "";
}
// fetch maxResult+1 rows so that we can check whether it is larger than zeppelin.spark.maxResult
List<Row> rows = df.takeAsList(maxResult + 1);
String template = context.getLocalProperties().get("template");
if (!StringUtils.isBlank(template)) {
if (rows.size() >= 1) {
return new SingleRowInterpreterResult(sparkRowToList(rows.get(0)), template, context).toHtml();
} else {
return "";
}
}
StringBuilder msg = new StringBuilder();
msg.append("\n%table ");
msg.append(StringUtils.join(TableDataUtils.normalizeColumns(columns), "\t"));
msg.append("\n");
boolean isLargerThanMaxResult = rows.size() > maxResult;
if (isLargerThanMaxResult) {
rows = rows.subList(0, maxResult);
}
for (Row row : rows) {
for (int i = 0; i < row.size(); ++i) {
msg.append(TableDataUtils.normalizeColumn(row.get(i)));
if (i != row.size() - 1) {
msg.append("\t");
}
}
msg.append("\n");
}
if (isLargerThanMaxResult) {
msg.append("\n");
msg.append(ResultMessages.getExceedsLimitRowsMessage(maxResult, "zeppelin.spark.maxResult"));
}
// append %text at the end, otherwise the following output will be put in table as well.
msg.append("\n%text ");
return msg.toString();
} else {
return obj.toString();
}
}
use of org.apache.zeppelin.interpreter.SingleRowInterpreterResult in project zeppelin by apache.
the class Spark3Shims method showDataFrame.
@Override
public String showDataFrame(Object obj, int maxResult, InterpreterContext context) {
if (obj instanceof Dataset) {
Dataset<Row> df = ((Dataset) obj).toDF();
String[] columns = df.columns();
// DDL will empty DataFrame
if (columns.length == 0) {
return "";
}
// fetch maxResult+1 rows so that we can check whether it is larger than zeppelin.spark.maxResult
List<Row> rows = df.takeAsList(maxResult + 1);
String template = context.getLocalProperties().get("template");
if (!StringUtils.isBlank(template)) {
if (rows.size() >= 1) {
return new SingleRowInterpreterResult(sparkRowToList(rows.get(0)), template, context).toHtml();
} else {
return "";
}
}
StringBuilder msg = new StringBuilder();
msg.append("%table ");
msg.append(StringUtils.join(TableDataUtils.normalizeColumns(columns), "\t"));
msg.append("\n");
boolean isLargerThanMaxResult = rows.size() > maxResult;
if (isLargerThanMaxResult) {
rows = rows.subList(0, maxResult);
}
for (Row row : rows) {
for (int i = 0; i < row.size(); ++i) {
msg.append(TableDataUtils.normalizeColumn(row.get(i)));
if (i != row.size() - 1) {
msg.append("\t");
}
}
msg.append("\n");
}
if (isLargerThanMaxResult) {
msg.append("\n");
msg.append(ResultMessages.getExceedsLimitRowsMessage(maxResult, "zeppelin.spark.maxResult"));
}
// append %text at the end, otherwise the following output will be put in table as well.
msg.append("\n%text ");
return msg.toString();
} else {
return obj.toString();
}
}
use of org.apache.zeppelin.interpreter.SingleRowInterpreterResult in project zeppelin by apache.
the class JDBCInterpreter method executeSql.
/**
* Execute the sql statement under this dbPrefix.
*
* @param dbPrefix
* @param sql
* @param context
* @return
* @throws InterpreterException
*/
private InterpreterResult executeSql(String dbPrefix, String sql, InterpreterContext context) throws InterpreterException {
Connection connection = null;
Statement statement;
ResultSet resultSet = null;
String paragraphId = context.getParagraphId();
String user = getUser(context);
try {
connection = getConnection(dbPrefix, context);
} catch (Exception e) {
LOGGER.error("Fail to getConnection", e);
try {
closeDBPool(user, dbPrefix);
} catch (SQLException e1) {
LOGGER.error("Cannot close DBPool for user, dbPrefix: " + user + dbPrefix, e1);
}
if (e instanceof SQLException) {
return new InterpreterResult(Code.ERROR, e.getMessage());
} else {
return new InterpreterResult(Code.ERROR, ExceptionUtils.getStackTrace(e));
}
}
if (connection == null) {
return new InterpreterResult(Code.ERROR, "Prefix not found.");
}
try {
List<String> sqlArray = sqlSplitter.splitSql(sql);
for (String sqlToExecute : sqlArray) {
String sqlTrimmedLowerCase = sqlToExecute.trim().toLowerCase();
if (sqlTrimmedLowerCase.startsWith("set ") || sqlTrimmedLowerCase.startsWith("list ") || sqlTrimmedLowerCase.startsWith("add ") || sqlTrimmedLowerCase.startsWith("delete ")) {
// some version of hive doesn't work with set statement with empty line ahead.
// so we need to trim it first in this case.
sqlToExecute = sqlToExecute.trim();
}
LOGGER.info("Execute sql: " + sqlToExecute);
statement = connection.createStatement();
// fetch n+1 rows in order to indicate there's more rows available (for large selects)
statement.setFetchSize(context.getIntLocalProperty("limit", getMaxResult()));
statement.setMaxRows(context.getIntLocalProperty("limit", maxRows));
if (statement == null) {
return new InterpreterResult(Code.ERROR, "Prefix not found.");
}
try {
getJDBCConfiguration(user).saveStatement(paragraphId, statement);
String statementPrecode = getProperty(String.format(STATEMENT_PRECODE_KEY_TEMPLATE, dbPrefix));
if (StringUtils.isNotBlank(statementPrecode)) {
statement.execute(statementPrecode);
}
// start hive monitor thread if it is hive jdbc
String jdbcURL = getJDBCConfiguration(user).getPropertyMap(dbPrefix).getProperty(URL_KEY);
if (jdbcURL != null && jdbcURL.startsWith("jdbc:hive2://")) {
HiveUtils.startHiveMonitorThread(statement, context, Boolean.parseBoolean(getProperty("hive.log.display", "true")), this);
}
boolean isResultSetAvailable = statement.execute(sqlToExecute);
getJDBCConfiguration(user).setConnectionInDBDriverPoolSuccessful(dbPrefix);
if (isResultSetAvailable) {
resultSet = statement.getResultSet();
// Regards that the command is DDL.
if (isDDLCommand(statement.getUpdateCount(), resultSet.getMetaData().getColumnCount())) {
context.out.write("%text Query executed successfully.\n");
} else {
String template = context.getLocalProperties().get("template");
if (!StringUtils.isBlank(template)) {
resultSet.next();
SingleRowInterpreterResult singleRowResult = new SingleRowInterpreterResult(getFirstRow(resultSet), template, context);
if (isFirstRefreshMap.get(context.getParagraphId())) {
context.out.write(singleRowResult.toAngular());
context.out.write("\n%text ");
context.out.flush();
isFirstRefreshMap.put(context.getParagraphId(), false);
}
singleRowResult.pushAngularObjects();
} else {
String results = getResults(resultSet, !containsIgnoreCase(sqlToExecute, EXPLAIN_PREDICATE));
context.out.write(results);
context.out.write("\n%text ");
context.out.flush();
}
}
} else {
// Response contains either an update count or there are no results.
int updateCount = statement.getUpdateCount();
context.out.write("\n%text " + "Query executed successfully. Affected rows : " + updateCount + "\n");
}
} finally {
if (resultSet != null) {
try {
resultSet.close();
} catch (SQLException e) {
/*ignored*/
}
}
if (statement != null) {
try {
statement.close();
} catch (SQLException e) {
/*ignored*/
}
}
}
}
} catch (Throwable e) {
LOGGER.error("Cannot run " + sql, e);
if (e instanceof SQLException) {
return new InterpreterResult(Code.ERROR, e.getMessage());
} else {
return new InterpreterResult(Code.ERROR, ExceptionUtils.getStackTrace(e));
}
} finally {
// In case user ran an insert/update/upsert statement
if (connection != null) {
try {
if (!connection.getAutoCommit()) {
connection.commit();
}
connection.close();
} catch (SQLException e) {
/*ignored*/
}
}
getJDBCConfiguration(user).removeStatement(paragraphId);
}
return new InterpreterResult(Code.SUCCESS);
}
use of org.apache.zeppelin.interpreter.SingleRowInterpreterResult in project zeppelin by apache.
the class SingleRowStreamSqlJob method buildResult.
@Override
protected String buildResult() {
SingleRowInterpreterResult singleRowResult = new SingleRowInterpreterResult(rowToList(latestRow), template, context);
singleRowResult.pushAngularObjects();
return singleRowResult.toAngular();
}
use of org.apache.zeppelin.interpreter.SingleRowInterpreterResult in project zeppelin by apache.
the class SingleRowStreamSqlJob method refresh.
@Override
protected void refresh(InterpreterContext context) throws Exception {
if (latestRow == null) {
LOGGER.warn("Skip RefreshTask as no data available");
return;
}
SingleRowInterpreterResult singleRowResult = new SingleRowInterpreterResult(rowToList(latestRow), template, context);
if (isFirstRefresh) {
context.out().clear(false);
context.out.write(singleRowResult.toAngular());
context.out.flush();
// should checkpoint the html output, otherwise frontend won't display the output
// after recovering.
context.getIntpEventClient().checkpointOutput(context.getNoteId(), context.getParagraphId());
isFirstRefresh = false;
}
singleRowResult.pushAngularObjects();
}
Aggregations