use of ch.qos.logback.classic.spi.ILoggingEvent in project ha-bridge by bwssytems.
the class LoggingUtil method getLogFileInfos.
/**
* Get the logfile information for the roor logger.
*
* @return List of LogFileInfo obejcts
*/
public static List<LogFileInfo> getLogFileInfos() {
final LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
final List<LogFileInfo> logFileInfos = new ArrayList<LogFileInfo>();
final Logger logger = lc.getLogger(Logger.ROOT_LOGGER_NAME);
final Iterator<Appender<ILoggingEvent>> it = logger.iteratorForAppenders();
while (it.hasNext()) {
final Appender<ILoggingEvent> appender = it.next();
if (appender instanceof FileAppender) {
final FileAppender<ILoggingEvent> fileAppender = (FileAppender<ILoggingEvent>) appender;
final File logFile = new File(fileAppender.getFile());
final LogFileInfo logFileInfo = new LogFileInfo();
logFileInfo.setFileName(logFile.getName());
logFileInfo.setFileLastChanged(new Date(logFile.lastModified()));
logFileInfo.setFileSize(logFile.length());
logFileInfos.add(logFileInfo);
}
}
return logFileInfos;
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project ha-bridge by bwssytems.
the class LoggingUtil method getLogFile.
/**
* Get the log file.
*
* @param logFileName The name of the log file
* @return The actual file
*/
public static File getLogFile(final String logFileName) {
if (logFileName == null) {
throw new IllegalArgumentException("logFileName cannot be null.");
}
final LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
final Logger logger = lc.getLogger(Logger.ROOT_LOGGER_NAME);
final Iterator<Appender<ILoggingEvent>> it = logger.iteratorForAppenders();
while (it.hasNext()) {
final Appender<ILoggingEvent> appender = it.next();
if (appender instanceof FileAppender) {
final FileAppender<ILoggingEvent> fileAppender = (FileAppender<ILoggingEvent>) appender;
final File logFile = new File(fileAppender.getFile());
if (logFile.getName().equalsIgnoreCase(logFileName)) {
return logFile;
}
}
}
return null;
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.
the class KafkaLogProcessorPipeline method processMessages.
/**
* Process messages fetched from a given partition.
*/
private boolean processMessages(String topic, int partition, Future<Iterable<MessageAndOffset>> future) throws InterruptedException, KafkaException, IOException {
Iterable<MessageAndOffset> messages;
try {
messages = future.get();
} catch (ExecutionException e) {
try {
throw e.getCause();
} catch (OffsetOutOfRangeException cause) {
// This shouldn't happen under normal situation.
// If happened, usually is caused by race between kafka log rotation and fetching in here,
// hence just fetching from the beginning should be fine
offsets.put(partition, getLastOffset(partition, kafka.api.OffsetRequest.EarliestTime()));
return false;
} catch (KafkaException | IOException cause) {
throw cause;
} catch (Throwable t) {
// For other type of exceptions, just throw an IOException. It will be handled by caller.
throw new IOException(t);
}
}
boolean processed = false;
for (MessageAndOffset message : messages) {
if (eventQueue.getEventSize() >= config.getMaxBufferSize()) {
// Log a message. If this happen too often, it indicates that more memory is needed for the log processing
OUTAGE_LOG.info("Maximum queue size {} reached for pipeline {}.", config.getMaxBufferSize(), name);
// If nothing has been appended (due to error), we break the loop so that no need event will be appended
// Since the offset is not updated, the same set of messages will be fetched again in next iteration.
int eventsAppended = appendEvents(System.currentTimeMillis(), true);
if (eventsAppended <= 0) {
break;
}
unSyncedEvents += eventsAppended;
}
try {
metricsContext.increment("kafka.bytes.read", message.message().payloadSize());
ILoggingEvent loggingEvent = serializer.fromBytes(message.message().payload());
// Use the message payload size as the size estimate of the logging event
// Although it's not the same as the in memory object size, it should be just a constant factor, hence
// it is proportional to the actual object size.
eventQueue.add(loggingEvent, loggingEvent.getTimeStamp(), message.message().payloadSize(), partition, new OffsetTime(message.nextOffset(), loggingEvent.getTimeStamp()));
} catch (IOException e) {
// This shouldn't happen. In case it happens (e.g. someone published some garbage), just skip the message.
LOG.trace("Fail to decode logging event from {}:{} at offset {}. Skipping it.", topic, partition, message.offset(), e);
}
processed = true;
offsets.put(partition, message.nextOffset());
}
return processed;
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.
the class KafkaLogProcessorPipeline method appendEvents.
/**
* Appends buffered events to appender. If the {@code force} parameter is {@code false}, buffered events
* that are older than the buffer milliseconds will be appended and removed from the buffer.
* If {@code force} is {@code true}, then at least {@code maxQueueSize * MIN_FREE_FACTOR} events will be appended
* and removed, regardless of the event time.
*
* @return number of events appended to the appender
*/
private int appendEvents(long currentTimeMillis, boolean forced) {
long minEventTime = currentTimeMillis - config.getEventDelayMillis();
long maxRetainSize = forced ? (long) (config.getMaxBufferSize() * MIN_FREE_FACTOR) : Long.MAX_VALUE;
TimeEventQueue.EventIterator<ILoggingEvent, OffsetTime> iterator = eventQueue.iterator();
int eventsAppended = 0;
long minDelay = Long.MAX_VALUE;
long maxDelay = -1;
while (iterator.hasNext()) {
ILoggingEvent event = iterator.next();
// buffering time, no need to iterate anymore
if (eventQueue.getEventSize() <= maxRetainSize && event.getTimeStamp() >= minEventTime) {
break;
}
// update delay
long delay = System.currentTimeMillis() - event.getTimeStamp();
minDelay = delay < minDelay ? delay : minDelay;
maxDelay = delay > maxDelay ? delay : maxDelay;
try {
// Otherwise, append the event
ch.qos.logback.classic.Logger effectiveLogger = context.getEffectiveLogger(event.getLoggerName());
if (event.getLevel().isGreaterOrEqual(effectiveLogger.getEffectiveLevel())) {
effectiveLogger.callAppenders(event);
}
} catch (Exception e) {
OUTAGE_LOG.warn("Failed to append log event in pipeline {}. Will be retried.", name, e);
break;
}
// Updates the Kafka offset before removing the current event
int partition = iterator.getPartition();
MutableCheckpoint checkpoint = checkpoints.get(partition);
// Get the smallest offset and corresponding timestamp from the event queue
OffsetTime offsetTime = eventQueue.getSmallestOffset(partition);
if (checkpoint == null) {
checkpoint = new MutableCheckpoint(offsetTime.getOffset(), offsetTime.getEventTime(), event.getTimeStamp());
checkpoints.put(partition, checkpoint);
} else {
checkpoint.setNextOffset(offsetTime.getOffset()).setNextEvenTime(offsetTime.getEventTime()).setMaxEventTime(event.getTimeStamp());
}
iterator.remove();
eventsAppended++;
}
// For each partition, if there is no more event in the event queue, update the checkpoint nextOffset
for (Int2LongMap.Entry entry : offsets.int2LongEntrySet()) {
int partition = entry.getIntKey();
if (eventQueue.isEmpty(partition)) {
MutableCheckpoint checkpoint = checkpoints.get(partition);
long offset = entry.getLongValue();
// it means everything before the process offset must had been written to the appender.
if (checkpoint != null && offset > checkpoint.getNextOffset()) {
checkpoint.setNextOffset(offset);
}
}
}
if (eventsAppended > 0) {
// events were appended from iterator
metricsContext.gauge(Constants.Metrics.Name.Log.PROCESS_MIN_DELAY, minDelay);
metricsContext.gauge(Constants.Metrics.Name.Log.PROCESS_MAX_DELAY, maxDelay);
metricsContext.increment(Constants.Metrics.Name.Log.PROCESS_MESSAGES_COUNT, eventsAppended);
}
// Failure to flush is ok and it will be retried by the wrapped appender
try {
metricsContext.gauge("event.queue.size.bytes", eventQueue.getEventSize());
context.flush();
} catch (IOException e) {
OUTAGE_LOG.warn("Failed to flush in pipeline {}. Will be retried.", name, e);
}
return eventsAppended;
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.
the class DistributedLogFrameworkTest method publishLog.
/**
* Publishes multiple log events.
*/
private void publishLog(String topic, LoggingContext context, Iterable<ILoggingEvent> events) {
KafkaPublisher.Preparer preparer = KAFKA_TESTER.getKafkaClient().getPublisher(KafkaPublisher.Ack.LEADER_RECEIVED, Compression.NONE).prepare(topic);
LoggingEventSerializer serializer = new LoggingEventSerializer();
for (ILoggingEvent event : events) {
preparer.add(ByteBuffer.wrap(serializer.toBytes(new LogMessage(event, context))), context.getLogPartition());
}
preparer.send();
}
Aggregations