use of java.io.Writer in project hadoop by apache.
the class TestAggregatedLogsBlock method writeLog.
private void writeLog(String fileName, String text) throws Exception {
File f = new File(fileName);
Writer writer = new FileWriter(f);
writer.write(text);
writer.flush();
writer.close();
}
use of java.io.Writer in project hadoop by apache.
the class TestContainerLogsUtils method createContainerLogInLocalDir.
private static void createContainerLogInLocalDir(Path appLogsDir, ContainerId containerId, FileSystem fs, String fileName, String content) throws IOException {
Path containerLogsDir = new Path(appLogsDir, containerId.toString());
if (fs.exists(containerLogsDir)) {
fs.delete(containerLogsDir, true);
}
assertTrue(fs.mkdirs(containerLogsDir));
Writer writer = new FileWriter(new File(containerLogsDir.toString(), fileName));
writer.write(content);
writer.close();
}
use of java.io.Writer in project hadoop by apache.
the class TestNMWebServer method writeContainerLogs.
private void writeContainerLogs(Context nmContext, ContainerId containerId, LocalDirsHandlerService dirsHandler) throws IOException, YarnException {
// ContainerLogDir should be created
File containerLogDir = ContainerLogsUtils.getContainerLogDirs(containerId, dirsHandler).get(0);
containerLogDir.mkdirs();
for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
Writer writer = new FileWriter(new File(containerLogDir, fileType));
writer.write(containerId.toString() + "\n Hello " + fileType + "!");
writer.close();
}
}
use of java.io.Writer in project hadoop by apache.
the class TestClusterMapReduceTestCase method _testMapReduce.
public void _testMapReduce(boolean restart) throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.write("hello4\n");
wr.close();
if (restart) {
stopCluster();
startCluster(false, null);
}
JobConf conf = createJobConf();
conf.setJobName("mr");
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(getFileSystem().listStatus(getOutputDir(), new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
assertTrue(line.contains("hello"));
line = reader.readLine();
}
reader.close();
assertEquals(4, counter);
}
}
use of java.io.Writer in project hadoop by apache.
the class TestMRCJCJobClient method runJob.
private String runJob() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.close();
JobConf conf = createJobConf();
conf.setJobName("mr");
conf.setJobPriority(JobPriority.HIGH);
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
return JobClient.runJob(conf).getID().toString();
}
Aggregations