use of org.apache.logging.log4j.core.LoggerContext in project hive by apache.
the class HiveMetaStore method main.
/**
* @param args
*/
public static void main(String[] args) throws Throwable {
final Configuration conf = MetastoreConf.newMetastoreConf();
shutdownHookMgr = ShutdownHookManager.get();
HiveMetastoreCli cli = new HiveMetastoreCli(conf);
cli.parse(args);
final boolean isCliVerbose = cli.isVerbose();
// NOTE: It is critical to do this prior to initializing log4j, otherwise
// any log specific settings via hiveconf will be ignored
Properties hiveconf = cli.addHiveconfToSystemProperties();
// before any of the other core hive classes are loaded
try {
// use Hive's default log4j configuration
if (System.getProperty("log4j.configurationFile") == null) {
LogUtils.initHiveLog4j(conf);
} else {
// reconfigure log4j after settings via hiveconf are write into System Properties
LoggerContext context = (LoggerContext) LogManager.getContext(false);
context.reconfigure();
}
} catch (LogUtils.LogInitializationException e) {
HMSHandler.LOG.warn(e.getMessage());
}
startupShutdownMessage(HiveMetaStore.class, args, LOG);
try {
String msg = "Starting hive metastore on port " + cli.port;
HMSHandler.LOG.info(msg);
if (cli.isVerbose()) {
System.err.println(msg);
}
// set all properties specified on the command line
for (Map.Entry<Object, Object> item : hiveconf.entrySet()) {
conf.set((String) item.getKey(), (String) item.getValue());
}
// Add shutdown hook.
shutdownHookMgr.addShutdownHook(() -> {
String shutdownMsg = "Shutting down hive metastore.";
HMSHandler.LOG.info(shutdownMsg);
if (isCliVerbose) {
System.err.println(shutdownMsg);
}
if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
try {
Metrics.shutdown();
} catch (Exception e) {
LOG.error("error in Metrics deinit: " + e.getClass().getName() + " " + e.getMessage(), e);
}
}
ThreadPool.shutdown();
}, 10);
// Start Metrics for Standalone (Remote) Mode
if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
try {
Metrics.initialize(conf);
} catch (Exception e) {
// log exception, but ignore inability to start
LOG.error("error in Metrics init: " + e.getClass().getName() + " " + e.getMessage(), e);
}
}
Lock startLock = new ReentrantLock();
Condition startCondition = startLock.newCondition();
AtomicBoolean startedServing = new AtomicBoolean();
startMetaStoreThreads(conf, startLock, startCondition, startedServing);
startMetaStore(cli.getPort(), HadoopThriftAuthBridge.getBridge(), conf, startLock, startCondition, startedServing);
} catch (Throwable t) {
// Catch the exception, log it and rethrow it.
HMSHandler.LOG.error("Metastore Thrift Server threw an exception...", t);
throw t;
}
}
use of org.apache.logging.log4j.core.LoggerContext in project hive by apache.
the class TestHive method testMetaStoreApiTiming.
/**
* Test logging of timing for metastore api calls
*
* @throws Throwable
*/
public void testMetaStoreApiTiming() throws Throwable {
// Get the RootLogger which, if you don't have log4j2-test.properties defined, will only log ERRORs
Logger logger = LogManager.getLogger("hive.ql.metadata.Hive");
Level oldLevel = logger.getLevel();
LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
Configuration config = ctx.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
loggerConfig.setLevel(Level.DEBUG);
ctx.updateLoggers();
// Create a String Appender to capture log output
StringAppender appender = StringAppender.createStringAppender("%m");
appender.addToLogger(logger.getName(), Level.DEBUG);
appender.start();
try {
hm.clearMetaCallTiming();
hm.getAllDatabases();
hm.dumpAndClearMetaCallTiming("test");
String logStr = appender.getOutput();
String expectedString = "getAllDatabases_()=";
Assert.assertTrue(logStr + " should contain <" + expectedString, logStr.contains(expectedString));
// reset the log buffer, verify new dump without any api call does not contain func
appender.reset();
hm.dumpAndClearMetaCallTiming("test");
logStr = appender.getOutput();
Assert.assertFalse(logStr + " should not contain <" + expectedString, logStr.contains(expectedString));
} finally {
loggerConfig.setLevel(oldLevel);
ctx.updateLoggers();
appender.removeFromLogger(logger.getName());
}
}
use of org.apache.logging.log4j.core.LoggerContext in project hive by apache.
the class OperationManager method initOperationLogCapture.
private void initOperationLogCapture(String loggingMode) {
// Register another Appender (with the same layout) that talks to us.
Appender ap = LogDivertAppender.createInstance(this, OperationLog.getLoggingLevel(loggingMode));
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configuration configuration = context.getConfiguration();
LoggerConfig loggerConfig = configuration.getLoggerConfig(LoggerFactory.getLogger(getClass()).getName());
loggerConfig.addAppender(ap, null, null);
context.updateLoggers();
ap.start();
}
use of org.apache.logging.log4j.core.LoggerContext in project storm by apache.
the class LogConfigManager method getLoggerLevels.
public Map<String, Level> getLoggerLevels() {
Configuration loggerConfig = ((LoggerContext) LogManager.getContext(false)).getConfiguration();
Map<String, Level> logLevelMap = new HashMap<>();
for (Map.Entry<String, LoggerConfig> entry : loggerConfig.getLoggers().entrySet()) {
logLevelMap.put(entry.getKey(), entry.getValue().getLevel());
}
return logLevelMap;
}
use of org.apache.logging.log4j.core.LoggerContext in project storm by apache.
the class LogConfigManager method processLogConfigChange.
public void processLogConfigChange(LogConfig logConfig) {
if (null != logConfig) {
LOG.debug("Processing received log config: {}", logConfig);
TreeMap<String, LogLevel> loggers = new TreeMap<>(logConfig.get_named_logger_level());
LoggerContext logContext = (LoggerContext) LogManager.getContext(false);
Map<String, LogLevel> newLogConfigs = new HashMap<>();
for (Map.Entry<String, LogLevel> entry : loggers.entrySet()) {
String msgLoggerName = entry.getKey();
msgLoggerName = ("ROOT".equalsIgnoreCase(msgLoggerName)) ? LogManager.ROOT_LOGGER_NAME : msgLoggerName;
LogLevel loggerLevel = entry.getValue();
// the new-timeouts map now contains logger => timeout
if (loggerLevel.is_set_reset_log_level_timeout_epoch()) {
LogLevel copy = new LogLevel(loggerLevel);
if (originalLogLevels.containsKey(msgLoggerName)) {
copy.set_reset_log_level(originalLogLevels.get(msgLoggerName).name());
} else {
copy.set_reset_log_level(Level.INFO.name());
}
newLogConfigs.put(msgLoggerName, copy);
}
}
// Look for deleted log timeouts
TreeMap<String, LogLevel> latestConf = latestLogConfig.get();
if (latestConf != null) {
for (String loggerName : latestConf.descendingKeySet()) {
if (!newLogConfigs.containsKey(loggerName)) {
// if we had a timeout, but the timeout is no longer active
setLoggerLevel(logContext, loggerName, latestConf.get(loggerName).get_reset_log_level());
}
}
}
// the merged configs are only for the reset logic
for (String loggerName : new TreeSet<>(logConfig.get_named_logger_level().keySet())) {
LogLevel logLevel = logConfig.get_named_logger_level().get(loggerName);
loggerName = ("ROOT".equalsIgnoreCase(loggerName)) ? LogManager.ROOT_LOGGER_NAME : loggerName;
LogLevelAction action = logLevel.get_action();
if (action == LogLevelAction.UPDATE) {
setLoggerLevel(logContext, loggerName, logLevel.get_target_log_level());
}
}
logContext.updateLoggers();
latestLogConfig.set(new TreeMap<>(newLogConfigs));
LOG.debug("New merged log config is {}", latestLogConfig.get());
}
}
Aggregations