Search in sources :

Example 1 with LoggingEvent

use of org.apache.log4j.spi.LoggingEvent in project hadoop by apache.

the class TestReplicationPolicy method testChooseTargetWithMoreThanAvailableNodes.

/**
   * In this testcase, it tries to choose more targets than available nodes and
   * check the result. 
   * @throws Exception
   */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
    // make data node 0 & 1 to be not qualified to choose: not enough disk space
    for (int i = 0; i < 2; i++) {
        updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
    }
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    // try to choose NUM_OF_DATANODES which is more than actually available
    // nodes.
    DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
    assertEquals(targets.length, dataNodes.length - 2);
    final List<LoggingEvent> log = appender.getLog();
    assertNotNull(log);
    assertFalse(log.size() == 0);
    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
    // Suppose to place replicas on each node but two data nodes are not
    // available for placing replica, so here we expect a short of 2
    assertTrue(((String) lastLogEntry.getMessage()).contains("in need of 2"));
    resetHeartbeatForStorages();
}
Also used : LoggingEvent(org.apache.log4j.spi.LoggingEvent) LogVerificationAppender(org.apache.hadoop.hdfs.LogVerificationAppender) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Example 2 with LoggingEvent

use of org.apache.log4j.spi.LoggingEvent in project hadoop by apache.

the class TestEditLog method testReadActivelyUpdatedLog.

/**
   *
   * @throws Exception
   */
@Test
public void testReadActivelyUpdatedLog() throws Exception {
    final TestAppender appender = new TestAppender();
    LogManager.getRootLogger().addAppender(appender);
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // Set single handler thread, so all transactions hit same thread-local ops.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        FSImage fsimage = cluster.getNamesystem().getFSImage();
        StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
        final DistributedFileSystem fileSys = cluster.getFileSystem();
        DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
        fileSys.mkdirs(new Path("/test"));
        fileSys.mkdirs(new Path("/test/dir1"));
        fileSys.delete(new Path("/test/dir1"), true);
        fsimage.getEditLog().logSync();
        fileSys.mkdirs(new Path("/test/dir2"));
        final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
        assertTrue(inProgressEdit.exists());
        EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
        FSEditLogOp op;
        long pos = 0;
        while (true) {
            op = elis.readOp();
            if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
                pos = elis.getPosition();
            } else {
                break;
            }
        }
        elis.close();
        assertTrue(pos > 0);
        RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
        rwf.seek(pos);
        assertEquals(rwf.readByte(), (byte) -1);
        rwf.seek(pos + 1);
        rwf.writeByte(2);
        rwf.close();
        events.poll();
        String pattern = "Caught exception after reading (.*) ops";
        Pattern r = Pattern.compile(pattern);
        final List<LoggingEvent> log = appender.getLog();
        for (LoggingEvent event : log) {
            Matcher m = r.matcher(event.getRenderedMessage());
            if (m.find()) {
                fail("Should not try to read past latest syned edit log op");
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        LogManager.getRootLogger().removeAppender(appender);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Pattern(java.util.regex.Pattern) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Matcher(java.util.regex.Matcher) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LoggingEvent(org.apache.log4j.spi.LoggingEvent) RandomAccessFile(java.io.RandomAccessFile) DFSInotifyEventInputStream(org.apache.hadoop.hdfs.DFSInotifyEventInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 3 with LoggingEvent

use of org.apache.log4j.spi.LoggingEvent in project hadoop by apache.

the class TestTaskLogAppender method testTaskLogAppender.

/**
 * test TaskLogAppender 
 */
@SuppressWarnings("deprecation")
@Test(timeout = 5000)
public void testTaskLogAppender() {
    TaskLogAppender appender = new TaskLogAppender();
    System.setProperty(TaskLogAppender.TASKID_PROPERTY, "attempt_01_02_m03_04_001");
    System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY, "1003");
    appender.activateOptions();
    assertEquals(appender.getTaskId(), "attempt_01_02_m03_04_001");
    assertEquals(appender.getTotalLogFileSize(), 1000);
    assertEquals(appender.getIsCleanup(), false);
    // test writer   
    Writer writer = new StringWriter();
    appender.setWriter(writer);
    Layout layout = new PatternLayout("%-5p [%t]: %m%n");
    appender.setLayout(layout);
    Category logger = Logger.getLogger(getClass().getName());
    LoggingEvent event = new LoggingEvent("fqnOfCategoryClass", logger, Priority.INFO, "message", new Throwable());
    appender.append(event);
    appender.flush();
    appender.close();
    assertTrue(writer.toString().length() > 0);
    // test cleanup should not changed 
    appender = new TaskLogAppender();
    appender.setIsCleanup(true);
    appender.activateOptions();
    assertEquals(appender.getIsCleanup(), true);
}
Also used : LoggingEvent(org.apache.log4j.spi.LoggingEvent) Category(org.apache.log4j.Category) StringWriter(java.io.StringWriter) Layout(org.apache.log4j.Layout) PatternLayout(org.apache.log4j.PatternLayout) PatternLayout(org.apache.log4j.PatternLayout) StringWriter(java.io.StringWriter) Writer(java.io.Writer) Test(org.junit.Test)

Example 4 with LoggingEvent

use of org.apache.log4j.spi.LoggingEvent in project commons by twitter.

the class JULBridgeHandler method toLoggingEvent.

/**
   * Converts a JUL log record into a Log4J logging event.
   *
   * @param record the JUL log record to convert
   * @param logger the Log4J logger to use for the logging event
   * @param level the Log4J level to use for the logging event
   * @param useExtendedLocationInfo if false, do no try to get source file and line informations
   * @return a Log4J logging event
   */
static LoggingEvent toLoggingEvent(LogRecord record, Logger logger, Level level, boolean useExtendedLocationInfo) {
    LocationInfo locationInfo = useExtendedLocationInfo ? new LocationInfo(new Throwable(), record.getSourceClassName()) : new LocationInfo("?", record.getSourceClassName(), record.getSourceMethodName(), "?");
    // Getting thread name from thread id? complicated...
    String threadName = String.valueOf(record.getThreadID());
    ThrowableInformation throwableInformation = record.getThrown() == null ? null : new ThrowableInformation(record.getThrown());
    return new LoggingEvent(record.getSourceClassName(), logger, record.getMillis(), level, formatMessage(record), threadName, throwableInformation, null, /* ndc */
    locationInfo, null);
}
Also used : LoggingEvent(org.apache.log4j.spi.LoggingEvent) ThrowableInformation(org.apache.log4j.spi.ThrowableInformation) LocationInfo(org.apache.log4j.spi.LocationInfo)

Example 5 with LoggingEvent

use of org.apache.log4j.spi.LoggingEvent in project commons by twitter.

the class JULBridgeHandler method publish.

/**
   * Publishes the log record to a Log4J logger of the same name.
   *
   * Before formatting the message, level is converted and message is discarded if Log4j logger is
   * not enabled for that level.
   *
   * @param record the record to publish
   */
@Override
public void publish(@Nullable LogRecord record) {
    // Ignore silently null records
    if (record == null) {
        return;
    }
    Logger log4jLogger = getLogger(record);
    Level log4jLevel = JULBridgeLevelConverter.toLog4jLevel(record.getLevel());
    if (log4jLogger.isEnabledFor(log4jLevel)) {
        LoggingEvent event = toLoggingEvent(record, log4jLogger, log4jLevel, useExtendedLocationInfo);
        log4jLogger.callAppenders(event);
    }
}
Also used : LoggingEvent(org.apache.log4j.spi.LoggingEvent) Level(org.apache.log4j.Level) Logger(org.apache.log4j.Logger)

Aggregations

LoggingEvent (org.apache.log4j.spi.LoggingEvent)48 Test (org.junit.Test)21 Logger (org.apache.log4j.Logger)12 Pattern (java.util.regex.Pattern)7 Matcher (java.util.regex.Matcher)6 ByteArrayOutputStream (org.apache.commons.io.output.ByteArrayOutputStream)6 AppenderSkeleton (org.apache.log4j.AppenderSkeleton)3 LoggingException (com.axway.ats.log.autodb.exceptions.LoggingException)2 AbstractLoggingEvent (com.axway.ats.log.autodb.model.AbstractLoggingEvent)2 IOException (java.io.IOException)2 Appender (org.apache.log4j.Appender)2 Category (org.apache.log4j.Category)2 Level (org.apache.log4j.Level)2 Filter (org.apache.log4j.spi.Filter)2 LocationInfo (org.apache.log4j.spi.LocationInfo)2 InsertMessageEvent (com.axway.ats.log.autodb.events.InsertMessageEvent)1 CacheableEvent (com.axway.ats.log.autodb.model.CacheableEvent)1 SystemLogLevel (com.axway.ats.log.model.SystemLogLevel)1 CurrentUser (com.google.gerrit.server.CurrentUser)1 IdentifiedUser (com.google.gerrit.server.IdentifiedUser)1