use of java.io.PrintWriter in project hadoop by apache.
the class TestHSWebApp method testLogsView1.
@Test
public void testLogsView1() throws IOException {
LOG.info("HsLogsPage");
Injector injector = WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class, new MockAppContext(0, 1, 1, 1));
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write("Cannot get container logs without a ContainerId");
verify(spyPw).write("Cannot get container logs without a NodeId");
verify(spyPw).write("Cannot get container logs without an app owner");
}
use of java.io.PrintWriter in project hadoop by apache.
the class TestNewCombinerGrouping method testCombiner.
@Test
public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in = new File(TEST_ROOT_DIR, "input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out = new File(TEST_ROOT_DIR, "output");
PrintWriter pw = new PrintWriter(new FileWriter(new File(in, "data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf conf = new JobConf();
conf.set("mapreduce.framework.name", "local");
Job job = new Job(conf);
TextInputFormat.setInputPaths(job, new Path(in.getPath()));
TextOutputFormat.setOutputPath(job, new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupComparator.class);
job.setCombinerKeyGroupingComparatorClass(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.getConfiguration().setInt("min.num.spills.for.combine", 0);
job.submit();
job.waitForCompletion(false);
if (job.isSuccessful()) {
Counters counters = job.getCounters();
long combinerInputRecords = counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter", "COMBINE_INPUT_RECORDS").getValue();
long combinerOutputRecords = counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter", "COMBINE_OUTPUT_RECORDS").getValue();
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br = new BufferedReader(new FileReader(new File(out, "part-r-00000")));
Set<String> output = new HashSet<String>();
String line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNull(line);
br.close();
Set<String> expected = new HashSet<String>();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected, output);
} else {
Assert.fail("Job failed");
}
}
use of java.io.PrintWriter in project hadoop by apache.
the class TestDistributedShell method testDSShellWithShellScript.
@Test
public void testDSShellWithShellScript() throws Exception {
final File basedir = new File("target", TestDistributedShell.class.getName());
final File tmpDir = new File(basedir, "tmpDir");
tmpDir.mkdirs();
final File customShellScript = new File(tmpDir, "custom_script.sh");
if (customShellScript.exists()) {
customShellScript.delete();
}
if (!customShellScript.createNewFile()) {
Assert.fail("Can not create custom shell script file.");
}
PrintWriter fileWriter = new PrintWriter(customShellScript);
// set the output to DEBUG level
fileWriter.write("echo testDSShellWithShellScript");
fileWriter.close();
System.out.println(customShellScript.getAbsolutePath());
String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_script", customShellScript.getAbsolutePath(), "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" };
LOG.info("Initializing DS Client");
final Client client = new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result = client.run();
LOG.info("Client run completed. Result=" + result);
List<String> expectedContent = new ArrayList<String>();
expectedContent.add("testDSShellWithShellScript");
verifyContainerLog(1, expectedContent, false, "");
}
use of java.io.PrintWriter in project hadoop by apache.
the class TestDistributedShell method testDSShellWithCustomLogPropertyFile.
@Test
public void testDSShellWithCustomLogPropertyFile() throws Exception {
final File basedir = new File("target", TestDistributedShell.class.getName());
final File tmpDir = new File(basedir, "tmpDir");
tmpDir.mkdirs();
final File customLogProperty = new File(tmpDir, "custom_log4j.properties");
if (customLogProperty.exists()) {
customLogProperty.delete();
}
if (!customLogProperty.createNewFile()) {
Assert.fail("Can not create custom log4j property file.");
}
PrintWriter fileWriter = new PrintWriter(customLogProperty);
// set the output to DEBUG level
fileWriter.write("log4j.rootLogger=debug,stdout");
fileWriter.close();
String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "3", "--shell_command", "echo", "--shell_args", "HADOOP", "--log_properties", customLogProperty.getAbsolutePath(), "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" };
//Before run the DS, the default the log level is INFO
final Log LOG_Client = LogFactory.getLog(Client.class);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertFalse(LOG_Client.isDebugEnabled());
final Log LOG_AM = LogFactory.getLog(ApplicationMaster.class);
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertFalse(LOG_AM.isDebugEnabled());
LOG.info("Initializing DS Client");
final Client client = new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result = client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(verifyContainerLog(3, null, true, "DEBUG") > 10);
//After DS is finished, the log level should be DEBUG
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertTrue(LOG_Client.isDebugEnabled());
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertTrue(LOG_AM.isDebugEnabled());
}
use of java.io.PrintWriter in project hadoop by apache.
the class SerializedExceptionPBImpl method init.
public void init(Throwable t) {
maybeInitBuilder();
if (t == null) {
return;
}
if (t.getCause() == null) {
} else {
builder.setCause(new SerializedExceptionPBImpl(t.getCause()).getProto());
}
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
pw.close();
if (sw.toString() != null)
builder.setTrace(sw.toString());
if (t.getMessage() != null)
builder.setMessage(t.getMessage());
builder.setClassName(t.getClass().getCanonicalName());
}
Aggregations