use of ch.qos.logback.classic.spi.ILoggingEvent in project drools-wb by kiegroup.
the class IndexDslInvalidDrlTest method testIndexDslInvalidDrl.
@Test
public void testIndexDslInvalidDrl() throws IOException, InterruptedException {
// Setup logging
final Logger root = (Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
final Appender<ILoggingEvent> mockAppender = mock(Appender.class);
when(mockAppender.getName()).thenReturn("MOCK");
root.addAppender(mockAppender);
// Add test files
final Path path = basePath.resolve("bz1269366.dsl");
final String dsl = loadText("bz1269366.dsl");
ioService().write(path, dsl);
// wait for events to be consumed from jgit -> (notify changes -> watcher -> index) -> lucene index
Thread.sleep(5000);
List<String> index = Arrays.asList(KObjectUtil.toKCluster(basePath.getFileSystem()).getClusterId());
{
final Query query = new SingleTermQueryBuilder(new ValueReferenceIndexTerm("org.drools.workbench.screens.dsltext.backend.server.indexing.classes.Applicant", ResourceType.JAVA)).build();
searchFor(index, query, 0);
verify(mockAppender).doAppend(argThat(new ArgumentMatcher<ILoggingEvent>() {
@Override
public boolean matches(final Object argument) {
return ((ILoggingEvent) argument).getMessage().startsWith("Unable to parse DRL");
}
}));
}
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project drools-wb by kiegroup.
the class IndexRuleInvalidDrlTest method testIndexRuleInvalidDrl.
@Test
public void testIndexRuleInvalidDrl() throws IOException, InterruptedException {
// Setup logging
final Logger root = (Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
final Appender<ILoggingEvent> mockAppender = mock(Appender.class);
when(mockAppender.getName()).thenReturn("MOCK");
root.addAppender(mockAppender);
// Add test file
final Path path = basePath.resolve("bz1269366.tdrl");
final String drl = loadText("bz1269366.tdrl");
ioService().write(path, drl);
// wait for events to be consumed from jgit -> (notify changes -> watcher -> index) -> lucene index
Thread.sleep(5000);
List<String> index = Arrays.asList(KObjectUtil.toKCluster(basePath.getFileSystem()).getClusterId());
{
final Query query = new SingleTermQueryBuilder(new ValueResourceIndexTerm("*myRule", ResourceType.RULE, TermSearchType.WILDCARD)).build();
searchFor(index, query, 0);
verify(mockAppender).doAppend(argThat(new ArgumentMatcher<ILoggingEvent>() {
@Override
public boolean matches(final Object argument) {
return ((ILoggingEvent) argument).getMessage().startsWith("Unable to parse DRL");
}
}));
}
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project dubbo by alibaba.
the class LogbackContainer method doInitializer.
/**
* Initializer logback
*
* @param file
* @param level
* @param maxHistory
*/
private void doInitializer(String file, String level, int maxHistory) {
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
Logger rootLogger = loggerContext.getLogger(Logger.ROOT_LOGGER_NAME);
rootLogger.detachAndStopAllAppenders();
// appender
RollingFileAppender<ILoggingEvent> fileAppender = new RollingFileAppender<ILoggingEvent>();
fileAppender.setContext(loggerContext);
fileAppender.setName("application");
fileAppender.setFile(file);
fileAppender.setAppend(true);
// policy
TimeBasedRollingPolicy<ILoggingEvent> policy = new TimeBasedRollingPolicy<ILoggingEvent>();
policy.setContext(loggerContext);
policy.setMaxHistory(maxHistory);
policy.setFileNamePattern(file + ".%d{yyyy-MM-dd}");
policy.setParent(fileAppender);
policy.start();
fileAppender.setRollingPolicy(policy);
// encoder
PatternLayoutEncoder encoder = new PatternLayoutEncoder();
encoder.setContext(loggerContext);
encoder.setPattern("%date [%thread] %-5level %logger (%file:%line\\) - %msg%n");
encoder.start();
fileAppender.setEncoder(encoder);
fileAppender.start();
rootLogger.addAppender(fileAppender);
rootLogger.setLevel(Level.toLevel(level));
rootLogger.setAdditive(false);
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.
the class KafkaLogProcessorPipeline method processMessages.
/**
* Process messages fetched from a given partition.
*/
private boolean processMessages(String topic, int partition, Future<Iterable<MessageAndOffset>> future) throws InterruptedException, KafkaException, IOException {
Iterable<MessageAndOffset> messages;
try {
messages = future.get();
} catch (ExecutionException e) {
try {
throw e.getCause();
} catch (OffsetOutOfRangeException cause) {
// This shouldn't happen under normal situation.
// If happened, usually is caused by race between kafka log rotation and fetching in here,
// hence just fetching from the beginning should be fine
offsets.put(partition, getLastOffset(partition, kafka.api.OffsetRequest.EarliestTime()));
return false;
} catch (KafkaException | IOException cause) {
throw cause;
} catch (Throwable t) {
// For other type of exceptions, just throw an IOException. It will be handled by caller.
throw new IOException(t);
}
}
boolean processed = false;
for (MessageAndOffset message : messages) {
if (eventQueue.getEventSize() >= config.getMaxBufferSize()) {
// Log a message. If this happen too often, it indicates that more memory is needed for the log processing
OUTAGE_LOG.info("Maximum queue size {} reached for pipeline {}.", config.getMaxBufferSize(), name);
// If nothing has been appended (due to error), we break the loop so that no need event will be appended
// Since the offset is not updated, the same set of messages will be fetched again in next iteration.
int eventsAppended = appendEvents(System.currentTimeMillis(), true);
if (eventsAppended <= 0) {
break;
}
unSyncedEvents += eventsAppended;
}
try {
metricsContext.increment("kafka.bytes.read", message.message().payloadSize());
ILoggingEvent loggingEvent = serializer.fromBytes(message.message().payload());
// Use the message payload size as the size estimate of the logging event
// Although it's not the same as the in memory object size, it should be just a constant factor, hence
// it is proportional to the actual object size.
eventQueue.add(loggingEvent, loggingEvent.getTimeStamp(), message.message().payloadSize(), partition, new OffsetTime(message.nextOffset(), loggingEvent.getTimeStamp()));
} catch (IOException e) {
// This shouldn't happen. In case it happens (e.g. someone published some garbage), just skip the message.
LOG.trace("Fail to decode logging event from {}:{} at offset {}. Skipping it.", topic, partition, message.offset(), e);
}
processed = true;
offsets.put(partition, message.nextOffset());
}
return processed;
}
use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.
the class KafkaLogProcessorPipeline method appendEvents.
/**
* Appends buffered events to appender. If the {@code force} parameter is {@code false}, buffered events
* that are older than the buffer milliseconds will be appended and removed from the buffer.
* If {@code force} is {@code true}, then at least {@code maxQueueSize * MIN_FREE_FACTOR} events will be appended
* and removed, regardless of the event time.
*
* @return number of events appended to the appender
*/
private int appendEvents(long currentTimeMillis, boolean forced) {
long minEventTime = currentTimeMillis - config.getEventDelayMillis();
long maxRetainSize = forced ? (long) (config.getMaxBufferSize() * MIN_FREE_FACTOR) : Long.MAX_VALUE;
TimeEventQueue.EventIterator<ILoggingEvent, OffsetTime> iterator = eventQueue.iterator();
int eventsAppended = 0;
long minDelay = Long.MAX_VALUE;
long maxDelay = -1;
while (iterator.hasNext()) {
ILoggingEvent event = iterator.next();
// buffering time, no need to iterate anymore
if (eventQueue.getEventSize() <= maxRetainSize && event.getTimeStamp() >= minEventTime) {
break;
}
// update delay
long delay = System.currentTimeMillis() - event.getTimeStamp();
minDelay = delay < minDelay ? delay : minDelay;
maxDelay = delay > maxDelay ? delay : maxDelay;
try {
// Otherwise, append the event
ch.qos.logback.classic.Logger effectiveLogger = context.getEffectiveLogger(event.getLoggerName());
if (event.getLevel().isGreaterOrEqual(effectiveLogger.getEffectiveLevel())) {
effectiveLogger.callAppenders(event);
}
} catch (Exception e) {
OUTAGE_LOG.warn("Failed to append log event in pipeline {}. Will be retried.", name, e);
break;
}
// Updates the Kafka offset before removing the current event
int partition = iterator.getPartition();
MutableCheckpoint checkpoint = checkpoints.get(partition);
// Get the smallest offset and corresponding timestamp from the event queue
OffsetTime offsetTime = eventQueue.getSmallestOffset(partition);
if (checkpoint == null) {
checkpoint = new MutableCheckpoint(offsetTime.getOffset(), offsetTime.getEventTime(), event.getTimeStamp());
checkpoints.put(partition, checkpoint);
} else {
checkpoint.setNextOffset(offsetTime.getOffset()).setNextEvenTime(offsetTime.getEventTime()).setMaxEventTime(event.getTimeStamp());
}
iterator.remove();
eventsAppended++;
}
// For each partition, if there is no more event in the event queue, update the checkpoint nextOffset
for (Int2LongMap.Entry entry : offsets.int2LongEntrySet()) {
int partition = entry.getIntKey();
if (eventQueue.isEmpty(partition)) {
MutableCheckpoint checkpoint = checkpoints.get(partition);
long offset = entry.getLongValue();
// it means everything before the process offset must had been written to the appender.
if (checkpoint != null && offset > checkpoint.getNextOffset()) {
checkpoint.setNextOffset(offset);
}
}
}
if (eventsAppended > 0) {
// events were appended from iterator
metricsContext.gauge(Constants.Metrics.Name.Log.PROCESS_MIN_DELAY, minDelay);
metricsContext.gauge(Constants.Metrics.Name.Log.PROCESS_MAX_DELAY, maxDelay);
metricsContext.increment(Constants.Metrics.Name.Log.PROCESS_MESSAGES_COUNT, eventsAppended);
}
// Failure to flush is ok and it will be retried by the wrapped appender
try {
metricsContext.gauge("event.queue.size.bytes", eventQueue.getEventSize());
context.flush();
} catch (IOException e) {
OUTAGE_LOG.warn("Failed to flush in pipeline {}. Will be retried.", name, e);
}
return eventsAppended;
}
Aggregations