Search in sources :

Example 56 with ILoggingEvent

use of ch.qos.logback.classic.spi.ILoggingEvent in project drools-wb by kiegroup.

the class IndexDslInvalidDrlTest method testIndexDslInvalidDrl.

@Test
public void testIndexDslInvalidDrl() throws IOException, InterruptedException {
    // Setup logging
    final Logger root = (Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
    final Appender<ILoggingEvent> mockAppender = mock(Appender.class);
    when(mockAppender.getName()).thenReturn("MOCK");
    root.addAppender(mockAppender);
    // Add test files
    final Path path = basePath.resolve("bz1269366.dsl");
    final String dsl = loadText("bz1269366.dsl");
    ioService().write(path, dsl);
    // wait for events to be consumed from jgit -> (notify changes -> watcher -> index) -> lucene index
    Thread.sleep(5000);
    List<String> index = Arrays.asList(KObjectUtil.toKCluster(basePath.getFileSystem()).getClusterId());
    {
        final Query query = new SingleTermQueryBuilder(new ValueReferenceIndexTerm("org.drools.workbench.screens.dsltext.backend.server.indexing.classes.Applicant", ResourceType.JAVA)).build();
        searchFor(index, query, 0);
        verify(mockAppender).doAppend(argThat(new ArgumentMatcher<ILoggingEvent>() {

            @Override
            public boolean matches(final Object argument) {
                return ((ILoggingEvent) argument).getMessage().startsWith("Unable to parse DRL");
            }
        }));
    }
}
Also used : Path(org.uberfire.java.nio.file.Path) Query(org.apache.lucene.search.Query) SingleTermQueryBuilder(org.kie.workbench.common.services.refactoring.backend.server.query.builder.SingleTermQueryBuilder) ValueReferenceIndexTerm(org.kie.workbench.common.services.refactoring.model.index.terms.valueterms.ValueReferenceIndexTerm) Logger(ch.qos.logback.classic.Logger) ILoggingEvent(ch.qos.logback.classic.spi.ILoggingEvent) Test(org.junit.Test) BaseIndexingTest(org.kie.workbench.common.services.refactoring.backend.server.BaseIndexingTest)

Example 57 with ILoggingEvent

use of ch.qos.logback.classic.spi.ILoggingEvent in project drools-wb by kiegroup.

the class IndexRuleInvalidDrlTest method testIndexRuleInvalidDrl.

@Test
public void testIndexRuleInvalidDrl() throws IOException, InterruptedException {
    // Setup logging
    final Logger root = (Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
    final Appender<ILoggingEvent> mockAppender = mock(Appender.class);
    when(mockAppender.getName()).thenReturn("MOCK");
    root.addAppender(mockAppender);
    // Add test file
    final Path path = basePath.resolve("bz1269366.tdrl");
    final String drl = loadText("bz1269366.tdrl");
    ioService().write(path, drl);
    // wait for events to be consumed from jgit -> (notify changes -> watcher -> index) -> lucene index
    Thread.sleep(5000);
    List<String> index = Arrays.asList(KObjectUtil.toKCluster(basePath.getFileSystem()).getClusterId());
    {
        final Query query = new SingleTermQueryBuilder(new ValueResourceIndexTerm("*myRule", ResourceType.RULE, TermSearchType.WILDCARD)).build();
        searchFor(index, query, 0);
        verify(mockAppender).doAppend(argThat(new ArgumentMatcher<ILoggingEvent>() {

            @Override
            public boolean matches(final Object argument) {
                return ((ILoggingEvent) argument).getMessage().startsWith("Unable to parse DRL");
            }
        }));
    }
}
Also used : Path(org.uberfire.java.nio.file.Path) ValueResourceIndexTerm(org.kie.workbench.common.services.refactoring.model.index.terms.valueterms.ValueResourceIndexTerm) Query(org.apache.lucene.search.Query) SingleTermQueryBuilder(org.kie.workbench.common.services.refactoring.backend.server.query.builder.SingleTermQueryBuilder) Logger(ch.qos.logback.classic.Logger) ILoggingEvent(ch.qos.logback.classic.spi.ILoggingEvent) BaseIndexingTest(org.kie.workbench.common.services.refactoring.backend.server.BaseIndexingTest) Test(org.junit.Test)

Example 58 with ILoggingEvent

use of ch.qos.logback.classic.spi.ILoggingEvent in project dubbo by alibaba.

the class LogbackContainer method doInitializer.

/**
 * Initializer logback
 *
 * @param file
 * @param level
 * @param maxHistory
 */
private void doInitializer(String file, String level, int maxHistory) {
    LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
    Logger rootLogger = loggerContext.getLogger(Logger.ROOT_LOGGER_NAME);
    rootLogger.detachAndStopAllAppenders();
    // appender
    RollingFileAppender<ILoggingEvent> fileAppender = new RollingFileAppender<ILoggingEvent>();
    fileAppender.setContext(loggerContext);
    fileAppender.setName("application");
    fileAppender.setFile(file);
    fileAppender.setAppend(true);
    // policy
    TimeBasedRollingPolicy<ILoggingEvent> policy = new TimeBasedRollingPolicy<ILoggingEvent>();
    policy.setContext(loggerContext);
    policy.setMaxHistory(maxHistory);
    policy.setFileNamePattern(file + ".%d{yyyy-MM-dd}");
    policy.setParent(fileAppender);
    policy.start();
    fileAppender.setRollingPolicy(policy);
    // encoder
    PatternLayoutEncoder encoder = new PatternLayoutEncoder();
    encoder.setContext(loggerContext);
    encoder.setPattern("%date [%thread] %-5level %logger (%file:%line\\) - %msg%n");
    encoder.start();
    fileAppender.setEncoder(encoder);
    fileAppender.start();
    rootLogger.addAppender(fileAppender);
    rootLogger.setLevel(Level.toLevel(level));
    rootLogger.setAdditive(false);
}
Also used : PatternLayoutEncoder(ch.qos.logback.classic.encoder.PatternLayoutEncoder) RollingFileAppender(ch.qos.logback.core.rolling.RollingFileAppender) Logger(ch.qos.logback.classic.Logger) ILoggingEvent(ch.qos.logback.classic.spi.ILoggingEvent) LoggerContext(ch.qos.logback.classic.LoggerContext) TimeBasedRollingPolicy(ch.qos.logback.core.rolling.TimeBasedRollingPolicy)

Example 59 with ILoggingEvent

use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.

the class KafkaLogProcessorPipeline method processMessages.

/**
   * Process messages fetched from a given partition.
   */
private boolean processMessages(String topic, int partition, Future<Iterable<MessageAndOffset>> future) throws InterruptedException, KafkaException, IOException {
    Iterable<MessageAndOffset> messages;
    try {
        messages = future.get();
    } catch (ExecutionException e) {
        try {
            throw e.getCause();
        } catch (OffsetOutOfRangeException cause) {
            // This shouldn't happen under normal situation.
            // If happened, usually is caused by race between kafka log rotation and fetching in here,
            // hence just fetching from the beginning should be fine
            offsets.put(partition, getLastOffset(partition, kafka.api.OffsetRequest.EarliestTime()));
            return false;
        } catch (KafkaException | IOException cause) {
            throw cause;
        } catch (Throwable t) {
            // For other type of exceptions, just throw an IOException. It will be handled by caller.
            throw new IOException(t);
        }
    }
    boolean processed = false;
    for (MessageAndOffset message : messages) {
        if (eventQueue.getEventSize() >= config.getMaxBufferSize()) {
            // Log a message. If this happen too often, it indicates that more memory is needed for the log processing
            OUTAGE_LOG.info("Maximum queue size {} reached for pipeline {}.", config.getMaxBufferSize(), name);
            // If nothing has been appended (due to error), we break the loop so that no need event will be appended
            // Since the offset is not updated, the same set of messages will be fetched again in next iteration.
            int eventsAppended = appendEvents(System.currentTimeMillis(), true);
            if (eventsAppended <= 0) {
                break;
            }
            unSyncedEvents += eventsAppended;
        }
        try {
            metricsContext.increment("kafka.bytes.read", message.message().payloadSize());
            ILoggingEvent loggingEvent = serializer.fromBytes(message.message().payload());
            // Use the message payload size as the size estimate of the logging event
            // Although it's not the same as the in memory object size, it should be just a constant factor, hence
            // it is proportional to the actual object size.
            eventQueue.add(loggingEvent, loggingEvent.getTimeStamp(), message.message().payloadSize(), partition, new OffsetTime(message.nextOffset(), loggingEvent.getTimeStamp()));
        } catch (IOException e) {
            // This shouldn't happen. In case it happens (e.g. someone published some garbage), just skip the message.
            LOG.trace("Fail to decode logging event from {}:{} at offset {}. Skipping it.", topic, partition, message.offset(), e);
        }
        processed = true;
        offsets.put(partition, message.nextOffset());
    }
    return processed;
}
Also used : MessageAndOffset(kafka.message.MessageAndOffset) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) OffsetOutOfRangeException(org.apache.kafka.common.errors.OffsetOutOfRangeException) ILoggingEvent(ch.qos.logback.classic.spi.ILoggingEvent) Checkpoint(co.cask.cdap.logging.meta.Checkpoint)

Example 60 with ILoggingEvent

use of ch.qos.logback.classic.spi.ILoggingEvent in project cdap by caskdata.

the class KafkaLogProcessorPipeline method appendEvents.

/**
   * Appends buffered events to appender. If the {@code force} parameter is {@code false}, buffered events
   * that are older than the buffer milliseconds will be appended and removed from the buffer.
   * If {@code force} is {@code true}, then at least {@code maxQueueSize * MIN_FREE_FACTOR} events will be appended
   * and removed, regardless of the event time.
   *
   * @return number of events appended to the appender
   */
private int appendEvents(long currentTimeMillis, boolean forced) {
    long minEventTime = currentTimeMillis - config.getEventDelayMillis();
    long maxRetainSize = forced ? (long) (config.getMaxBufferSize() * MIN_FREE_FACTOR) : Long.MAX_VALUE;
    TimeEventQueue.EventIterator<ILoggingEvent, OffsetTime> iterator = eventQueue.iterator();
    int eventsAppended = 0;
    long minDelay = Long.MAX_VALUE;
    long maxDelay = -1;
    while (iterator.hasNext()) {
        ILoggingEvent event = iterator.next();
        // buffering time, no need to iterate anymore
        if (eventQueue.getEventSize() <= maxRetainSize && event.getTimeStamp() >= minEventTime) {
            break;
        }
        // update delay
        long delay = System.currentTimeMillis() - event.getTimeStamp();
        minDelay = delay < minDelay ? delay : minDelay;
        maxDelay = delay > maxDelay ? delay : maxDelay;
        try {
            // Otherwise, append the event
            ch.qos.logback.classic.Logger effectiveLogger = context.getEffectiveLogger(event.getLoggerName());
            if (event.getLevel().isGreaterOrEqual(effectiveLogger.getEffectiveLevel())) {
                effectiveLogger.callAppenders(event);
            }
        } catch (Exception e) {
            OUTAGE_LOG.warn("Failed to append log event in pipeline {}. Will be retried.", name, e);
            break;
        }
        // Updates the Kafka offset before removing the current event
        int partition = iterator.getPartition();
        MutableCheckpoint checkpoint = checkpoints.get(partition);
        // Get the smallest offset and corresponding timestamp from the event queue
        OffsetTime offsetTime = eventQueue.getSmallestOffset(partition);
        if (checkpoint == null) {
            checkpoint = new MutableCheckpoint(offsetTime.getOffset(), offsetTime.getEventTime(), event.getTimeStamp());
            checkpoints.put(partition, checkpoint);
        } else {
            checkpoint.setNextOffset(offsetTime.getOffset()).setNextEvenTime(offsetTime.getEventTime()).setMaxEventTime(event.getTimeStamp());
        }
        iterator.remove();
        eventsAppended++;
    }
    // For each partition, if there is no more event in the event queue, update the checkpoint nextOffset
    for (Int2LongMap.Entry entry : offsets.int2LongEntrySet()) {
        int partition = entry.getIntKey();
        if (eventQueue.isEmpty(partition)) {
            MutableCheckpoint checkpoint = checkpoints.get(partition);
            long offset = entry.getLongValue();
            // it means everything before the process offset must had been written to the appender.
            if (checkpoint != null && offset > checkpoint.getNextOffset()) {
                checkpoint.setNextOffset(offset);
            }
        }
    }
    if (eventsAppended > 0) {
        // events were appended from iterator
        metricsContext.gauge(Constants.Metrics.Name.Log.PROCESS_MIN_DELAY, minDelay);
        metricsContext.gauge(Constants.Metrics.Name.Log.PROCESS_MAX_DELAY, maxDelay);
        metricsContext.increment(Constants.Metrics.Name.Log.PROCESS_MESSAGES_COUNT, eventsAppended);
    }
    // Failure to flush is ok and it will be retried by the wrapped appender
    try {
        metricsContext.gauge("event.queue.size.bytes", eventQueue.getEventSize());
        context.flush();
    } catch (IOException e) {
        OUTAGE_LOG.warn("Failed to flush in pipeline {}. Will be retried.", name, e);
    }
    return eventsAppended;
}
Also used : IOException(java.io.IOException) ILoggingEvent(ch.qos.logback.classic.spi.ILoggingEvent) Checkpoint(co.cask.cdap.logging.meta.Checkpoint) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) KafkaException(org.apache.kafka.common.KafkaException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) OffsetOutOfRangeException(org.apache.kafka.common.errors.OffsetOutOfRangeException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Int2LongMap(it.unimi.dsi.fastutil.ints.Int2LongMap) TimeEventQueue(co.cask.cdap.logging.pipeline.TimeEventQueue)

Aggregations

ILoggingEvent (ch.qos.logback.classic.spi.ILoggingEvent)121 Test (org.junit.Test)53 Logger (ch.qos.logback.classic.Logger)44 LoggerContext (ch.qos.logback.classic.LoggerContext)41 PatternLayoutEncoder (ch.qos.logback.classic.encoder.PatternLayoutEncoder)23 FileAppender (ch.qos.logback.core.FileAppender)21 AsyncLoggingEventAppenderFactory (io.dropwizard.logging.async.AsyncLoggingEventAppenderFactory)18 File (java.io.File)18 DropwizardLayoutFactory (io.dropwizard.logging.layout.DropwizardLayoutFactory)17 Appender (ch.qos.logback.core.Appender)15 ConsoleAppender (ch.qos.logback.core.ConsoleAppender)14 RollingFileAppender (ch.qos.logback.core.rolling.RollingFileAppender)12 AsyncAppender (ch.qos.logback.classic.AsyncAppender)11 NullLevelFilterFactory (io.dropwizard.logging.filter.NullLevelFilterFactory)10 ArrayList (java.util.ArrayList)10 Logger (org.slf4j.Logger)10 TimeBasedRollingPolicy (ch.qos.logback.core.rolling.TimeBasedRollingPolicy)7 LogMessage (co.cask.cdap.logging.appender.LogMessage)7 IOException (java.io.IOException)6 Query (org.apache.lucene.search.Query)6