use of org.apache.log4j.Logger in project hadoop by apache.
the class TestLeaderElectorService method setUp.
@Before
public void setUp() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.INFO);
conf = new Configuration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.setBoolean(YarnConfiguration.CURATOR_LEADER_ELECTOR, true);
conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
}
zkCluster = new TestingCluster(3);
conf.set(YarnConfiguration.RM_ZK_ADDRESS, zkCluster.getConnectString());
zkCluster.start();
}
use of org.apache.log4j.Logger in project hadoop by apache.
the class TestApplicationCleanup method setup.
@Before
public void setup() throws UnknownHostException {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf = new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
use of org.apache.log4j.Logger in project hadoop by apache.
the class TestRMRestart method setup.
@Before
public void setup() throws IOException {
conf = getConf();
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
UserGroupInformation.setConfiguration(conf);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
rmAddr = new InetSocketAddress("localhost", 8032);
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
use of org.apache.log4j.Logger in project hadoop by apache.
the class TestSignalContainer method testSignalRequestDeliveryToNM.
@Test
public void testSignalRequestDeliveryToNM() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM();
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5000);
RMApp app = rm.submitApp(2000);
//kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
//request for containers
final int request = 2;
am.allocate("h1", 1000, request, new ArrayList<ContainerId>());
//kick the scheduler
nm1.nodeHeartbeat(true);
List<Container> conts = null;
int contReceived = 0;
int waitCount = 0;
while (contReceived < request && waitCount++ < 200) {
LOG.info("Got " + contReceived + " containers. Waiting to get " + request);
Thread.sleep(100);
conts = am.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers();
contReceived += conts.size();
}
Assert.assertEquals(request, contReceived);
for (Container container : conts) {
rm.signalToContainer(container.getId(), SignalContainerCommand.OUTPUT_THREAD_DUMP);
}
NodeHeartbeatResponse resp;
List<SignalContainerRequest> contsToSignal;
int signaledConts = 0;
waitCount = 0;
while (signaledConts < request && waitCount++ < 200) {
LOG.info("Waiting to get signalcontainer events.. signaledConts: " + signaledConts);
resp = nm1.nodeHeartbeat(true);
contsToSignal = resp.getContainersToSignalList();
signaledConts += contsToSignal.size();
Thread.sleep(100);
}
// Verify NM receives the expected number of signal container requests.
Assert.assertEquals(request, signaledConts);
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
rm.stop();
}
use of org.apache.log4j.Logger in project hbase by apache.
the class BackupDriver method parseAndRun.
private int parseAndRun(String[] args) throws IOException {
// Check if backup is enabled
if (!BackupManager.isBackupEnabled(getConf())) {
System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
return -1;
}
System.out.println(BackupRestoreConstants.VERIFY_BACKUP);
String cmd = null;
String[] remainArgs = null;
if (args == null || args.length == 0) {
printToolUsage();
return -1;
} else {
cmd = args[0];
remainArgs = new String[args.length - 1];
if (args.length > 1) {
System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
}
}
BackupCommand type = BackupCommand.HELP;
if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.CREATE;
} else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.HELP;
} else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.DELETE;
} else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.DESCRIBE;
} else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.HISTORY;
} else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.PROGRESS;
} else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) {
type = BackupCommand.SET;
} else {
System.out.println("Unsupported command for backup: " + cmd);
printToolUsage();
return -1;
}
// enable debug logging
Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
if (this.cmd.hasOption(OPTION_DEBUG)) {
backupClientLogger.setLevel(Level.DEBUG);
} else {
backupClientLogger.setLevel(Level.INFO);
}
BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd);
if (type == BackupCommand.CREATE && conf != null) {
((BackupCommands.CreateCommand) command).setConf(conf);
}
try {
command.execute();
} catch (IOException e) {
if (e.getMessage().equals(BackupCommands.INCORRECT_USAGE)) {
return -1;
}
throw e;
}
return 0;
}
Aggregations