Search in sources :

Example 26 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestFifoScheduler method testFifoScheduling.

@Test(timeout = 60000)
public void testFifoScheduling() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    MockRM rm = new MockRM(conf);
    rm.start();
    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
    MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB);
    RMApp app1 = rm.submitApp(2048);
    // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
    nm1.nodeHeartbeat(true);
    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
    am1.registerAppAttempt();
    SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
    Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
    RMApp app2 = rm.submitApp(2048);
    // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
    nm2.nodeHeartbeat(true);
    RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
    MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
    am2.registerAppAttempt();
    SchedulerNodeReport report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
    Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize());
    // add request for containers
    am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
    // send the request
    AllocateResponse alloc1Response = am1.schedule();
    // add request for containers
    am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1);
    // send the request
    AllocateResponse alloc2Response = am2.schedule();
    // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
    nm1.nodeHeartbeat(true);
    while (alloc1Response.getAllocatedContainers().size() < 1) {
        LOG.info("Waiting for containers to be created for app 1...");
        Thread.sleep(1000);
        alloc1Response = am1.schedule();
    }
    while (alloc2Response.getAllocatedContainers().size() < 1) {
        LOG.info("Waiting for containers to be created for app 2...");
        Thread.sleep(1000);
        alloc2Response = am2.schedule();
    }
    // kick the scheduler, nothing given remaining 2 GB.
    nm2.nodeHeartbeat(true);
    List<Container> allocated1 = alloc1Response.getAllocatedContainers();
    Assert.assertEquals(1, allocated1.size());
    Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemorySize());
    Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
    List<Container> allocated2 = alloc2Response.getAllocatedContainers();
    Assert.assertEquals(1, allocated2.size());
    Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemorySize());
    Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
    report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
    Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize());
    Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemorySize());
    Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemorySize());
    Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize());
    Container c1 = allocated1.get(0);
    Assert.assertEquals(GB, c1.getResource().getMemorySize());
    ContainerStatus containerStatus = BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE, "", 0, c1.getResource());
    nm1.containerStatus(containerStatus);
    int waitCount = 0;
    while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
        LOG.info("Waiting for containers to be finished for app 1... Tried " + waitCount + " times already..");
        Thread.sleep(1000);
    }
    Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
    Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
    Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemorySize());
    rm.stop();
}
Also used : AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) SchedulerNodeReport(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Example 27 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestFifoScheduler method testAllocateContainerOnNodeWithoutOffSwitchSpecified.

@Test(timeout = 60000)
public void testAllocateContainerOnNodeWithoutOffSwitchSpecified() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    MockRM rm = new MockRM(conf);
    rm.start();
    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
    RMApp app1 = rm.submitApp(2048);
    // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
    nm1.nodeHeartbeat(true);
    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
    am1.registerAppAttempt();
    // add request for containers
    List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
    requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1));
    requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1));
    // send the request
    am1.allocate(requests, null);
    try {
        // kick the schedule
        nm1.nodeHeartbeat(true);
    } catch (NullPointerException e) {
        Assert.fail("NPE when allocating container on node but " + "forget to set off-switch request should be handled");
    }
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) ArrayList(java.util.ArrayList) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Example 28 with Logger

use of org.apache.log4j.Logger in project hbase by apache.

the class MapreduceDependencyClasspathTool method main.

public static void main(String[] argv) throws Exception {
    // Silence the usual noise. This is probably fragile...
    Logger logger = Logger.getLogger("org.apache.hadoop.hbase");
    if (logger != null) {
        logger.setLevel(Level.WARN);
    }
    System.exit(ToolRunner.run(HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv));
}
Also used : Logger(org.apache.log4j.Logger)

Example 29 with Logger

use of org.apache.log4j.Logger in project zookeeper by apache.

the class QuorumPeerMainTest method testBadPeerAddressInQuorum.

/**
     * Verify handling of bad quorum address
     */
@Test
public void testBadPeerAddressInQuorum() throws Exception {
    ClientBase.setupTestEnv();
    // setup the logger to capture all logs
    Layout layout = Logger.getRootLogger().getAppender("CONSOLE").getLayout();
    ByteArrayOutputStream os = new ByteArrayOutputStream();
    WriterAppender appender = new WriterAppender(layout, os);
    appender.setThreshold(Level.WARN);
    Logger qlogger = Logger.getLogger("org.apache.zookeeper.server.quorum");
    qlogger.addAppender(appender);
    try {
        final int CLIENT_PORT_QP1 = PortAssignment.unique();
        final int CLIENT_PORT_QP2 = PortAssignment.unique();
        String quorumCfgSection = "server.1=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ";" + CLIENT_PORT_QP1 + "\nserver.2=fee.fii.foo.fum:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ";" + CLIENT_PORT_QP2;
        MainThread q1 = new MainThread(1, CLIENT_PORT_QP1, quorumCfgSection);
        q1.start();
        boolean isup = ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_QP1, 30000);
        Assert.assertFalse("Server never came up", isup);
        q1.shutdown();
        Assert.assertTrue("waiting for server 1 down", ClientBase.waitForServerDown("127.0.0.1:" + CLIENT_PORT_QP1, ClientBase.CONNECTION_TIMEOUT));
    } finally {
        qlogger.removeAppender(appender);
    }
    LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
    String line;
    boolean found = false;
    Pattern p = Pattern.compile(".*Cannot open channel to .* at election address .*");
    while ((line = r.readLine()) != null) {
        found = p.matcher(line).matches();
        if (found) {
            break;
        }
    }
    Assert.assertTrue("complains about host", found);
}
Also used : Pattern(java.util.regex.Pattern) Layout(org.apache.log4j.Layout) StringReader(java.io.StringReader) ByteArrayOutputStream(java.io.ByteArrayOutputStream) WriterAppender(org.apache.log4j.WriterAppender) Logger(org.apache.log4j.Logger) LineNumberReader(java.io.LineNumberReader) Test(org.junit.Test)

Example 30 with Logger

use of org.apache.log4j.Logger in project zookeeper by apache.

the class ReadOnlyModeTest method testSeekForRwServer.

/**
     * Ensures that client seeks for r/w servers while it's connected to r/o
     * server.
     */
@SuppressWarnings("deprecation")
@Test(timeout = 90000)
public void testSeekForRwServer() throws Exception {
    // setup the logger to capture all logs
    Layout layout = Logger.getRootLogger().getAppender("CONSOLE").getLayout();
    ByteArrayOutputStream os = new ByteArrayOutputStream();
    WriterAppender appender = new WriterAppender(layout, os);
    appender.setImmediateFlush(true);
    appender.setThreshold(Level.INFO);
    Logger zlogger = Logger.getLogger("org.apache.zookeeper");
    zlogger.addAppender(appender);
    try {
        qu.shutdown(2);
        CountdownWatcher watcher = new CountdownWatcher();
        ZooKeeper zk = new ZooKeeper(qu.getConnString(), CONNECTION_TIMEOUT, watcher, true);
        watcher.waitForConnected(CONNECTION_TIMEOUT);
        // if we don't suspend a peer it will rejoin a quorum
        qu.getPeer(1).peer.suspend();
        // start two servers to form a quorum; client should detect this and
        // connect to one of them
        watcher.reset();
        qu.start(2);
        qu.start(3);
        ClientBase.waitForServerUp(qu.getConnString(), 2000);
        watcher.waitForConnected(CONNECTION_TIMEOUT);
        zk.create("/test", "test".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
        // resume poor fellow
        qu.getPeer(1).peer.resume();
    } finally {
        zlogger.removeAppender(appender);
    }
    os.close();
    LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
    String line;
    Pattern p = Pattern.compile(".*Majority server found.*");
    boolean found = false;
    while ((line = r.readLine()) != null) {
        if (p.matcher(line).matches()) {
            found = true;
            break;
        }
    }
    Assert.assertTrue("Majority server wasn't found while connected to r/o server", found);
}
Also used : Pattern(java.util.regex.Pattern) ZooKeeper(org.apache.zookeeper.ZooKeeper) Layout(org.apache.log4j.Layout) CountdownWatcher(org.apache.zookeeper.test.ClientBase.CountdownWatcher) StringReader(java.io.StringReader) ByteArrayOutputStream(java.io.ByteArrayOutputStream) WriterAppender(org.apache.log4j.WriterAppender) Logger(org.apache.log4j.Logger) LineNumberReader(java.io.LineNumberReader) Test(org.junit.Test)

Aggregations

Logger (org.apache.log4j.Logger)391 Test (org.testng.annotations.Test)100 ArrayList (java.util.ArrayList)86 Test (org.junit.Test)53 InetSocketAddress (java.net.InetSocketAddress)37 ConditionCheck (com.linkedin.databus2.test.ConditionCheck)36 HashMap (java.util.HashMap)35 Checkpoint (com.linkedin.databus.core.Checkpoint)34 IOException (java.io.IOException)32 File (java.io.File)30 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)24 List (java.util.List)22 IdNamePair (com.linkedin.databus.core.util.IdNamePair)21 DBUtil (ngse.org.DBUtil)20 Channel (org.jboss.netty.channel.Channel)19 DbusEventBuffer (com.linkedin.databus.core.DbusEventBuffer)18 SQLException (java.sql.SQLException)18 Vector (java.util.Vector)18 DbusEventGenerator (com.linkedin.databus.core.test.DbusEventGenerator)17 OutputStream (java.io.OutputStream)17