Search in sources :

Example 16 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestLeaderElectorService method setUp.

@Before
public void setUp() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.INFO);
    conf = new Configuration();
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    conf.setBoolean(YarnConfiguration.CURATOR_LEADER_ELECTOR, true);
    conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
    conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
    for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
        conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
        conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
    }
    zkCluster = new TestingCluster(3);
    conf.set(YarnConfiguration.RM_ZK_ADDRESS, zkCluster.getConnectString());
    zkCluster.start();
}
Also used : TestingCluster(org.apache.curator.test.TestingCluster) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Logger(org.apache.log4j.Logger) Before(org.junit.Before)

Example 17 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestApplicationCleanup method setup.

@Before
public void setup() throws UnknownHostException {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    conf = new YarnConfiguration();
    UserGroupInformation.setConfiguration(conf);
    conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
    Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
Also used : MemoryRMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Logger(org.apache.log4j.Logger) Before(org.junit.Before)

Example 18 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestRMRestart method setup.

@Before
public void setup() throws IOException {
    conf = getConf();
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    UserGroupInformation.setConfiguration(conf);
    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
    conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
    rmAddr = new InetSocketAddress("localhost", 8032);
    Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
Also used : MemoryRMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore) InetSocketAddress(java.net.InetSocketAddress) Logger(org.apache.log4j.Logger) Before(org.junit.Before)

Example 19 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestSignalContainer method testSignalRequestDeliveryToNM.

@Test
public void testSignalRequestDeliveryToNM() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    MockRM rm = new MockRM();
    rm.start();
    MockNM nm1 = rm.registerNode("h1:1234", 5000);
    RMApp app = rm.submitApp(2000);
    //kick the scheduling
    nm1.nodeHeartbeat(true);
    RMAppAttempt attempt = app.getCurrentAppAttempt();
    MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
    am.registerAppAttempt();
    //request for containers
    final int request = 2;
    am.allocate("h1", 1000, request, new ArrayList<ContainerId>());
    //kick the scheduler
    nm1.nodeHeartbeat(true);
    List<Container> conts = null;
    int contReceived = 0;
    int waitCount = 0;
    while (contReceived < request && waitCount++ < 200) {
        LOG.info("Got " + contReceived + " containers. Waiting to get " + request);
        Thread.sleep(100);
        conts = am.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers();
        contReceived += conts.size();
    }
    Assert.assertEquals(request, contReceived);
    for (Container container : conts) {
        rm.signalToContainer(container.getId(), SignalContainerCommand.OUTPUT_THREAD_DUMP);
    }
    NodeHeartbeatResponse resp;
    List<SignalContainerRequest> contsToSignal;
    int signaledConts = 0;
    waitCount = 0;
    while (signaledConts < request && waitCount++ < 200) {
        LOG.info("Waiting to get signalcontainer events.. signaledConts: " + signaledConts);
        resp = nm1.nodeHeartbeat(true);
        contsToSignal = resp.getContainersToSignalList();
        signaledConts += contsToSignal.size();
        Thread.sleep(100);
    }
    // Verify NM receives the expected number of signal container requests.
    Assert.assertEquals(request, signaledConts);
    am.unregisterAppAttempt();
    nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
    rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) NodeHeartbeatResponse(org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse) SignalContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest) Logger(org.apache.log4j.Logger) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 20 with Logger

use of org.apache.log4j.Logger in project hbase by apache.

the class BackupDriver method parseAndRun.

private int parseAndRun(String[] args) throws IOException {
    // Check if backup is enabled
    if (!BackupManager.isBackupEnabled(getConf())) {
        System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
        return -1;
    }
    System.out.println(BackupRestoreConstants.VERIFY_BACKUP);
    String cmd = null;
    String[] remainArgs = null;
    if (args == null || args.length == 0) {
        printToolUsage();
        return -1;
    } else {
        cmd = args[0];
        remainArgs = new String[args.length - 1];
        if (args.length > 1) {
            System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
        }
    }
    BackupCommand type = BackupCommand.HELP;
    if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.CREATE;
    } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.HELP;
    } else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.DELETE;
    } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.DESCRIBE;
    } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.HISTORY;
    } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.PROGRESS;
    } else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) {
        type = BackupCommand.SET;
    } else {
        System.out.println("Unsupported command for backup: " + cmd);
        printToolUsage();
        return -1;
    }
    // enable debug logging
    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
    if (this.cmd.hasOption(OPTION_DEBUG)) {
        backupClientLogger.setLevel(Level.DEBUG);
    } else {
        backupClientLogger.setLevel(Level.INFO);
    }
    BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd);
    if (type == BackupCommand.CREATE && conf != null) {
        ((BackupCommands.CreateCommand) command).setConf(conf);
    }
    try {
        command.execute();
    } catch (IOException e) {
        if (e.getMessage().equals(BackupCommands.INCORRECT_USAGE)) {
            return -1;
        }
        throw e;
    }
    return 0;
}
Also used : BackupCommands(org.apache.hadoop.hbase.backup.impl.BackupCommands) BackupCommand(org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand) IOException(java.io.IOException) Logger(org.apache.log4j.Logger)

Aggregations

Logger (org.apache.log4j.Logger)391 Test (org.testng.annotations.Test)100 ArrayList (java.util.ArrayList)86 Test (org.junit.Test)53 InetSocketAddress (java.net.InetSocketAddress)37 ConditionCheck (com.linkedin.databus2.test.ConditionCheck)36 HashMap (java.util.HashMap)35 Checkpoint (com.linkedin.databus.core.Checkpoint)34 IOException (java.io.IOException)32 File (java.io.File)30 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)24 List (java.util.List)22 IdNamePair (com.linkedin.databus.core.util.IdNamePair)21 DBUtil (ngse.org.DBUtil)20 Channel (org.jboss.netty.channel.Channel)19 DbusEventBuffer (com.linkedin.databus.core.DbusEventBuffer)18 SQLException (java.sql.SQLException)18 Vector (java.util.Vector)18 DbusEventGenerator (com.linkedin.databus.core.test.DbusEventGenerator)17 OutputStream (java.io.OutputStream)17