Search in sources :

Example 6 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestAuditLogs method setupCluster.

@Before
public void setupCluster() throws Exception {
    // must configure prior to instantiating the namesystem because it
    // will reconfigure the logger if async is enabled
    configureAuditLogs();
    conf = new HdfsConfiguration();
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING, useAsyncEdits);
    util = new DFSTestUtil.Builder().setName("TestAuditAllowed").setNumFiles(20).build();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    util.createFiles(fs, fileName);
    // make sure the appender is what it's supposed to be
    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
    @SuppressWarnings("unchecked") List<Appender> appenders = Collections.list(logger.getAllAppenders());
    assertEquals(1, appenders.size());
    assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender);
    fnames = util.getFileNames(fileName);
    util.waitReplication(fs, fileName, (short) 3);
    userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
}
Also used : Appender(org.apache.log4j.Appender) AsyncAppender(org.apache.log4j.AsyncAppender) RollingFileAppender(org.apache.log4j.RollingFileAppender) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) AsyncAppender(org.apache.log4j.AsyncAppender) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Logger(org.apache.log4j.Logger) Before(org.junit.Before)

Example 7 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestAuditLogs method verifyAuditLogsRepeat.

// Ensure audit log has exactly N entries
private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe) throws IOException {
    // Turn off the logs
    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
    logger.setLevel(Level.OFF);
    // Close the appenders and force all logs to be flushed
    Enumeration<?> appenders = logger.getAllAppenders();
    while (appenders.hasMoreElements()) {
        Appender appender = (Appender) appenders.nextElement();
        appender.close();
    }
    BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
    String line = null;
    boolean ret = true;
    try {
        for (int i = 0; i < ndupe; i++) {
            line = reader.readLine();
            assertNotNull(line);
            assertTrue("Expected audit event not found in audit log", auditPattern.matcher(line).matches());
            ret &= successPattern.matcher(line).matches();
        }
        assertNull("Unexpected event in audit log", reader.readLine());
        assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
    } finally {
        reader.close();
    }
}
Also used : Appender(org.apache.log4j.Appender) AsyncAppender(org.apache.log4j.AsyncAppender) RollingFileAppender(org.apache.log4j.RollingFileAppender) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Logger(org.apache.log4j.Logger)

Example 8 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestAuditLogs method verifyAuditLogsCheckPattern.

// Ensure audit log has exactly N entries
private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern) throws IOException {
    // Turn off the logs
    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
    logger.setLevel(Level.OFF);
    // Close the appenders and force all logs to be flushed
    Enumeration<?> appenders = logger.getAllAppenders();
    while (appenders.hasMoreElements()) {
        Appender appender = (Appender) appenders.nextElement();
        appender.close();
    }
    BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
    String line = null;
    boolean ret = true;
    boolean patternMatches = false;
    try {
        for (int i = 0; i < ndupe; i++) {
            line = reader.readLine();
            assertNotNull(line);
            patternMatches |= pattern.matcher(line).matches();
            ret &= successPattern.matcher(line).matches();
        }
        assertNull("Unexpected event in audit log", reader.readLine());
        assertTrue("Expected audit event not found in audit log", patternMatches);
        assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
    } finally {
        reader.close();
    }
}
Also used : Appender(org.apache.log4j.Appender) AsyncAppender(org.apache.log4j.AsyncAppender) RollingFileAppender(org.apache.log4j.RollingFileAppender) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Logger(org.apache.log4j.Logger)

Example 9 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestAuditLogs method configureAuditLogs.

private void configureAuditLogs() throws IOException {
    // Shutdown the LogManager to release all logger open file handles.
    // Unfortunately, Apache commons logging library does not provide
    // means to release underlying loggers. For additional info look up
    // commons library FAQ.
    LogManager.shutdown();
    File file = new File(auditLogFile);
    if (file.exists()) {
        assertTrue(file.delete());
    }
    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
    // disable logging while the cluster startup preps files
    logger.setLevel(Level.OFF);
    PatternLayout layout = new PatternLayout("%m%n");
    RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
    logger.addAppender(appender);
}
Also used : Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) RollingFileAppender(org.apache.log4j.RollingFileAppender) PatternLayout(org.apache.log4j.PatternLayout) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Logger(org.apache.log4j.Logger) File(java.io.File)

Example 10 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestReplicationPolicy method testChooseTargetWithMoreThanAvailableNodes.

/**
   * In this testcase, it tries to choose more targets than available nodes and
   * check the result. 
   * @throws Exception
   */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
    // make data node 0 & 1 to be not qualified to choose: not enough disk space
    for (int i = 0; i < 2; i++) {
        updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
    }
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    // try to choose NUM_OF_DATANODES which is more than actually available
    // nodes.
    DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
    assertEquals(targets.length, dataNodes.length - 2);
    final List<LoggingEvent> log = appender.getLog();
    assertNotNull(log);
    assertFalse(log.size() == 0);
    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
    // Suppose to place replicas on each node but two data nodes are not
    // available for placing replica, so here we expect a short of 2
    assertTrue(((String) lastLogEntry.getMessage()).contains("in need of 2"));
    resetHeartbeatForStorages();
}
Also used : LoggingEvent(org.apache.log4j.spi.LoggingEvent) LogVerificationAppender(org.apache.hadoop.hdfs.LogVerificationAppender) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Aggregations

Logger (org.apache.log4j.Logger)391 Test (org.testng.annotations.Test)100 ArrayList (java.util.ArrayList)86 Test (org.junit.Test)53 InetSocketAddress (java.net.InetSocketAddress)37 ConditionCheck (com.linkedin.databus2.test.ConditionCheck)36 HashMap (java.util.HashMap)35 Checkpoint (com.linkedin.databus.core.Checkpoint)34 IOException (java.io.IOException)32 File (java.io.File)30 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)24 List (java.util.List)22 IdNamePair (com.linkedin.databus.core.util.IdNamePair)21 DBUtil (ngse.org.DBUtil)20 Channel (org.jboss.netty.channel.Channel)19 DbusEventBuffer (com.linkedin.databus.core.DbusEventBuffer)18 SQLException (java.sql.SQLException)18 Vector (java.util.Vector)18 DbusEventGenerator (com.linkedin.databus.core.test.DbusEventGenerator)17 OutputStream (java.io.OutputStream)17