use of org.apache.hadoop.hive.ql.session.OperationLog in project hive by apache.
the class LogDivertAppender method append.
@Override
public void append(LogEvent event) {
super.append(event);
String logOutput = getOutput();
manager.reset();
OperationLog log = operationManager.getOperationLogByThread();
if (log == null) {
LOG.debug(" ---+++=== Dropped log event from thread " + event.getThreadName());
return;
}
log.writeOperationLog(logOutput);
}
use of org.apache.hadoop.hive.ql.session.OperationLog in project hive by apache.
the class OperationManager method getOperationLogRowSet.
public RowSet getOperationLogRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows, HiveConf hConf) throws HiveSQLException {
TableSchema tableSchema = new TableSchema(getLogSchema());
RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion(), false);
if (hConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) == false) {
LOG.warn("Try to get operation log when hive.server2.logging.operation.enabled is false, no log will be returned. ");
return rowSet;
}
// get the OperationLog object from the operation
OperationLog operationLog = getOperation(opHandle).getOperationLog();
if (operationLog == null) {
throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle);
}
// read logs
List<String> logs;
try {
logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows);
} catch (SQLException e) {
throw new HiveSQLException(e.getMessage(), e.getCause());
}
// convert logs to RowSet
for (String log : logs) {
rowSet.addRow(new String[] { log });
}
return rowSet;
}
use of org.apache.hadoop.hive.ql.session.OperationLog in project hive by apache.
the class Operation method createOperationLog.
protected void createOperationLog() {
if (parentSession.isOperationLogEnabled()) {
File operationLogFile = new File(parentSession.getOperationLogSessionDir(), opHandle.getHandleIdentifier().toString());
isOperationLogEnabled = true;
// create log file
try {
if (operationLogFile.exists()) {
LOG.warn("The operation log file should not exist, but it is already there: " + operationLogFile.getAbsolutePath());
operationLogFile.delete();
}
if (!operationLogFile.getParentFile().exists()) {
LOG.warn("Operations log directory for this session does not exist, it could have been deleted " + "externally. Recreating the directory for future queries in this session but the older operation " + "logs for this session are no longer available");
if (!operationLogFile.getParentFile().mkdir()) {
LOG.warn("Log directory for this session could not be created, disabling " + "operation logs: " + operationLogFile.getParentFile().getAbsolutePath());
isOperationLogEnabled = false;
return;
}
}
if (!operationLogFile.createNewFile()) {
// If it can be read/written, keep its contents and use it.
if (!operationLogFile.canRead() || !operationLogFile.canWrite()) {
LOG.warn("The already existed operation log file cannot be recreated, " + "and it cannot be read or written: " + operationLogFile.getAbsolutePath());
isOperationLogEnabled = false;
return;
}
}
} catch (Exception e) {
LOG.warn("Unable to create operation log file: " + operationLogFile.getAbsolutePath(), e);
isOperationLogEnabled = false;
return;
}
// create OperationLog object with above log file
try {
operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf());
} catch (FileNotFoundException e) {
LOG.warn("Unable to instantiate OperationLog object for operation: " + opHandle, e);
isOperationLogEnabled = false;
return;
}
// register this operationLog to current thread
OperationLog.setCurrentOperationLog(operationLog);
}
}
use of org.apache.hadoop.hive.ql.session.OperationLog in project hive by apache.
the class HiveCommandOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
String command = getStatement().trim();
String[] tokens = statement.split("\\s");
String commandArgs = command.substring(tokens[0].length()).trim();
CommandProcessorResponse response = commandProcessor.run(commandArgs);
int returnCode = response.getResponseCode();
if (returnCode != 0) {
throw toSQLException("Error while processing statement", response);
}
Schema schema = response.getSchema();
if (schema != null) {
setHasResultSet(true);
resultSchema = new TableSchema(schema);
} else {
setHasResultSet(false);
resultSchema = new TableSchema();
}
if (response.getConsoleMessages() != null) {
// Propagate processor messages (if any) to beeline or other client.
OperationLog ol = OperationLog.getCurrentOperationLog();
if (ol != null) {
for (String consoleMsg : response.getConsoleMessages()) {
ol.writeOperationLog(LoggingLevel.EXECUTION, consoleMsg + "\n");
}
}
}
} catch (HiveSQLException e) {
setState(OperationState.ERROR);
throw e;
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException("Error running query: " + e.toString(), e);
}
setState(OperationState.FINISHED);
}
use of org.apache.hadoop.hive.ql.session.OperationLog in project hive by apache.
the class ColumnStatsSemanticAnalyzer method logTypeWarning.
private void logTypeWarning(String colName, String colType) {
String warning = "Only primitive type arguments are accepted but " + colType + " is passed for " + colName + ".";
warning = "WARNING: " + warning;
console.printInfo(warning);
// Propagate warning to beeline via operation log.
OperationLog ol = OperationLog.getCurrentOperationLog();
if (ol != null) {
ol.writeOperationLog(LoggingLevel.EXECUTION, warning + "\n");
}
}
Aggregations