Search in sources :

Example 16 with Log4JLogger

use of org.apache.commons.logging.impl.Log4JLogger in project hadoop by apache.

the class NavBlock method render.

@Override
public void render(Block html) {
    boolean addErrorsAndWarningsLink = false;
    Log log = LogFactory.getLog(NavBlock.class);
    if (log instanceof Log4JLogger) {
        Log4jWarningErrorMetricsAppender appender = Log4jWarningErrorMetricsAppender.findAppender();
        if (appender != null) {
            addErrorsAndWarningsLink = true;
        }
    }
    UL<DIV<Hamlet>> mainList = html.div("#nav").h3("Cluster").ul().li().a(url("cluster"), "About")._().li().a(url("nodes"), "Nodes")._().li().a(url("nodelabels"), "Node Labels")._();
    UL<LI<UL<DIV<Hamlet>>>> subAppsList = mainList.li().a(url("apps"), "Applications").ul();
    subAppsList.li()._();
    for (YarnApplicationState state : YarnApplicationState.values()) {
        subAppsList.li().a(url("apps", state.toString()), state.toString())._();
    }
    subAppsList._()._();
    UL<DIV<Hamlet>> tools = mainList.li().a(url("scheduler"), "Scheduler")._()._().h3("Tools").ul();
    tools.li().a("/conf", "Configuration")._().li().a("/logs", "Local logs")._().li().a("/stacks", "Server stacks")._().li().a("/jmx?qry=Hadoop:*", "Server metrics")._();
    if (addErrorsAndWarningsLink) {
        tools.li().a(url("errors-and-warnings"), "Errors/Warnings")._();
    }
    tools._()._();
}
Also used : DIV(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV) Log(org.apache.commons.logging.Log) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Log4jWarningErrorMetricsAppender(org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) LI(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI)

Example 17 with Log4JLogger

use of org.apache.commons.logging.impl.Log4JLogger in project hadoop by apache.

the class BenchmarkThroughput method run.

@Override
public int run(String[] args) throws IOException {
    // silence the minidfs cluster
    Log hadoopLog = LogFactory.getLog("org");
    if (hadoopLog instanceof Log4JLogger) {
        GenericTestUtils.setLogLevel(hadoopLog, Level.WARN);
    }
    int reps = 1;
    if (args.length == 1) {
        try {
            reps = Integer.parseInt(args[0]);
        } catch (NumberFormatException e) {
            printUsage();
            return -1;
        }
    } else if (args.length > 1) {
        printUsage();
        return -1;
    }
    Configuration conf = getConf();
    // the size of the file to write
    long SIZE = conf.getLong("dfsthroughput.file.size", 10L * 1024 * 1024 * 1024);
    BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
    String localDir = conf.get("mapred.temp.dir");
    if (localDir == null) {
        localDir = conf.get("hadoop.tmp.dir");
        conf.set("mapred.temp.dir", localDir);
    }
    dir = new LocalDirAllocator("mapred.temp.dir");
    System.setProperty("test.build.data", localDir);
    System.out.println("Local = " + localDir);
    ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
    FileSystem rawLocal = checkedLocal.getRawFileSystem();
    for (int i = 0; i < reps; ++i) {
        writeAndReadLocalFile("local", conf, SIZE);
        writeAndReadFile(rawLocal, "raw", conf, SIZE);
        writeAndReadFile(checkedLocal, "checked", conf, SIZE);
    }
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).racks(new String[] { "/foo" }).build();
        cluster.waitActive();
        FileSystem dfs = cluster.getFileSystem();
        for (int i = 0; i < reps; ++i) {
            writeAndReadFile(dfs, "dfs", conf, SIZE);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
            // clean up minidfs junk
            rawLocal.delete(new Path(localDir, "dfs"), true);
        }
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Log(org.apache.commons.logging.Log) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) FileSystem(org.apache.hadoop.fs.FileSystem) ChecksumFileSystem(org.apache.hadoop.fs.ChecksumFileSystem) LocalDirAllocator(org.apache.hadoop.fs.LocalDirAllocator) ChecksumFileSystem(org.apache.hadoop.fs.ChecksumFileSystem)

Example 18 with Log4JLogger

use of org.apache.commons.logging.impl.Log4JLogger in project hadoop by apache.

the class TestAuditLogs method setupAuditLogs.

/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
    // enable logging now that the test is ready to run
    logger.setLevel(Level.INFO);
}
Also used : Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) Logger(org.apache.log4j.Logger)

Example 19 with Log4JLogger

use of org.apache.commons.logging.impl.Log4JLogger in project hadoop by apache.

the class TestFileSystemOperationsWithThreads method setUp.

@Before
public void setUp() throws Exception {
    super.setUp();
    Configuration conf = fs.getConf();
    // By default enable parallel threads for rename and delete operations.
    // Also enable flat listing of blobs for these operations.
    conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, renameThreads);
    conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, deleteThreads);
    conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, true);
    URI uri = fs.getUri();
    fs.initialize(uri, conf);
    // Capture logs
    logs = LogCapturer.captureLogs(new Log4JLogger(Logger.getRootLogger()));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) URI(java.net.URI) Before(org.junit.Before)

Example 20 with Log4JLogger

use of org.apache.commons.logging.impl.Log4JLogger in project hadoop by apache.

the class TestNativeAzureFileSystemClientLogging method testLoggingDisabled.

@Test
public void testLoggingDisabled() throws Exception {
    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger.getRootLogger()));
    // Update configuration based on the Test.
    updateFileSystemConfiguration(false);
    performWASBOperations();
    assertFalse(verifyStorageClientLogs(logs.getOutput(), TEMP_DIR));
}
Also used : Log4JLogger(org.apache.commons.logging.impl.Log4JLogger) LogCapturer(org.apache.hadoop.test.GenericTestUtils.LogCapturer) Test(org.junit.Test)

Aggregations

Log4JLogger (org.apache.commons.logging.impl.Log4JLogger)21 Logger (org.apache.log4j.Logger)10 Log (org.apache.commons.logging.Log)9 Appender (org.apache.log4j.Appender)8 AsyncAppender (org.apache.log4j.AsyncAppender)5 RollingFileAppender (org.apache.log4j.RollingFileAppender)5 File (java.io.File)4 Log4jWarningErrorMetricsAppender (org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender)4 BufferedReader (java.io.BufferedReader)3 FileReader (java.io.FileReader)3 Hamlet (org.apache.hadoop.yarn.webapp.hamlet.Hamlet)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2 LogConfigurationException (org.apache.commons.logging.LogConfigurationException)2 Configuration (org.apache.hadoop.conf.Configuration)2 LogCapturer (org.apache.hadoop.test.GenericTestUtils.LogCapturer)2 PatternLayout (org.apache.log4j.PatternLayout)2 NCSARequestLog (org.eclipse.jetty.server.NCSARequestLog)2 RequestLog (org.eclipse.jetty.server.RequestLog)2 Before (org.junit.Before)2