use of org.apache.commons.io.output.StringBuilderWriter in project hive by apache.
the class LineageLogger method run.
@Override
public void run(HookContext hookContext) {
assert (hookContext.getHookType() == HookType.POST_EXEC_HOOK);
QueryPlan plan = hookContext.getQueryPlan();
Index index = hookContext.getIndex();
SessionState ss = SessionState.get();
if (ss != null && index != null && OPERATION_NAMES.contains(plan.getOperationName()) && !plan.isExplain()) {
try {
StringBuilderWriter out = new StringBuilderWriter(1024);
JsonWriter writer = new JsonWriter(out);
String queryStr = plan.getQueryStr().trim();
writer.beginObject();
writer.name("version").value(FORMAT_VERSION);
HiveConf conf = ss.getConf();
boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST);
if (!testMode) {
// Don't emit user/timestamp info in test mode,
// so that the test golden output file is fixed.
long queryTime = plan.getQueryStartTime().longValue();
if (queryTime == 0)
queryTime = System.currentTimeMillis();
long duration = System.currentTimeMillis() - queryTime;
writer.name("user").value(hookContext.getUgi().getUserName());
writer.name("timestamp").value(queryTime / 1000);
writer.name("duration").value(duration);
writer.name("jobIds");
writer.beginArray();
List<TaskRunner> tasks = hookContext.getCompleteTaskList();
if (tasks != null && !tasks.isEmpty()) {
for (TaskRunner task : tasks) {
String jobId = task.getTask().getJobID();
if (jobId != null) {
writer.value(jobId);
}
}
}
writer.endArray();
}
writer.name("engine").value(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE));
writer.name("database").value(ss.getCurrentDatabase());
writer.name("hash").value(getQueryHash(queryStr));
writer.name("queryText").value(queryStr);
List<Edge> edges = getEdges(plan, index);
Set<Vertex> vertices = getVertices(edges);
writeEdges(writer, edges);
writeVertices(writer, vertices);
writer.endObject();
writer.close();
// Logger the lineage info
String lineage = out.toString();
if (testMode) {
// Logger to console
log(lineage);
} else {
// In non-test mode, emit to a log file,
// which can be different from the normal hive.log.
// For example, using NoDeleteRollingFileAppender to
// log to some file with different rolling policy.
LOG.info(lineage);
}
} catch (Throwable t) {
// Don't fail the query just because of any lineage issue.
log("Failed to log lineage graph, query is not affected\n" + org.apache.hadoop.util.StringUtils.stringifyException(t));
}
}
}
use of org.apache.commons.io.output.StringBuilderWriter in project fess by codelibs.
the class FailureUrlService method getStackTrace.
private String getStackTrace(final Throwable t) {
final SystemHelper systemHelper = ComponentUtil.getSystemHelper();
final StringBuilderWriter sw = new StringBuilderWriter();
final PrintWriter pw = new PrintWriter(sw, true);
t.printStackTrace(pw);
return systemHelper.abbreviateLongText(sw.toString());
}
Aggregations