use of java.io.OutputStreamWriter in project hadoop by apache.
the class ApplicationCLI method listApplicationAttempts.
/**
* Lists the application attempts matching the given applicationid
*
* @param applicationId
* @throws YarnException
* @throws IOException
*/
private void listApplicationAttempts(String applicationId) throws YarnException, IOException {
PrintWriter writer = new PrintWriter(new OutputStreamWriter(sysout, Charset.forName("UTF-8")));
List<ApplicationAttemptReport> appAttemptsReport = client.getApplicationAttempts(ApplicationId.fromString(applicationId));
writer.println("Total number of application attempts " + ":" + appAttemptsReport.size());
writer.printf(APPLICATION_ATTEMPTS_PATTERN, "ApplicationAttempt-Id", "State", "AM-Container-Id", "Tracking-URL");
for (ApplicationAttemptReport appAttemptReport : appAttemptsReport) {
writer.printf(APPLICATION_ATTEMPTS_PATTERN, appAttemptReport.getApplicationAttemptId(), appAttemptReport.getYarnApplicationAttemptState(), appAttemptReport.getAMContainerId() == null ? "N/A" : appAttemptReport.getAMContainerId().toString(), appAttemptReport.getTrackingUrl());
}
writer.flush();
}
use of java.io.OutputStreamWriter in project hadoop by apache.
the class ApplicationCLI method printApplicationAttemptReport.
/**
* Prints the application attempt report for an application attempt id.
*
* @param applicationAttemptId
* @return exitCode
* @throws YarnException
*/
private int printApplicationAttemptReport(String applicationAttemptId) throws YarnException, IOException {
ApplicationAttemptReport appAttemptReport = null;
try {
appAttemptReport = client.getApplicationAttemptReport(ApplicationAttemptId.fromString(applicationAttemptId));
} catch (ApplicationNotFoundException e) {
sysout.println("Application for AppAttempt with id '" + applicationAttemptId + "' doesn't exist in RM or Timeline Server.");
return -1;
} catch (ApplicationAttemptNotFoundException e) {
sysout.println("Application Attempt with id '" + applicationAttemptId + "' doesn't exist in RM or Timeline Server.");
return -1;
}
// Use PrintWriter.println, which uses correct platform line ending.
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter appAttemptReportStr = new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8")));
if (appAttemptReport != null) {
appAttemptReportStr.println("Application Attempt Report : ");
appAttemptReportStr.print("\tApplicationAttempt-Id : ");
appAttemptReportStr.println(appAttemptReport.getApplicationAttemptId());
appAttemptReportStr.print("\tState : ");
appAttemptReportStr.println(appAttemptReport.getYarnApplicationAttemptState());
appAttemptReportStr.print("\tAMContainer : ");
appAttemptReportStr.println(appAttemptReport.getAMContainerId() == null ? "N/A" : appAttemptReport.getAMContainerId().toString());
appAttemptReportStr.print("\tTracking-URL : ");
appAttemptReportStr.println(appAttemptReport.getTrackingUrl());
appAttemptReportStr.print("\tRPC Port : ");
appAttemptReportStr.println(appAttemptReport.getRpcPort());
appAttemptReportStr.print("\tAM Host : ");
appAttemptReportStr.println(appAttemptReport.getHost());
appAttemptReportStr.print("\tDiagnostics : ");
appAttemptReportStr.print(appAttemptReport.getDiagnostics());
} else {
appAttemptReportStr.print("Application Attempt with id '" + applicationAttemptId + "' doesn't exist in Timeline Server.");
appAttemptReportStr.close();
sysout.println(baos.toString("UTF-8"));
return -1;
}
appAttemptReportStr.close();
sysout.println(baos.toString("UTF-8"));
return 0;
}
use of java.io.OutputStreamWriter in project hadoop by apache.
the class TestAggregatedLogFormat method writeSrcFileAndALog.
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length, Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId) throws Exception {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
final int ch = filler;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(new Configuration(), remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue = spy(new LogValue(Collections.singletonList(srcFileRoot.toString()), testContainerId, ugi.getShortUserName()));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = new Thread() {
public void run() {
try {
for (int i = 0; i < length / 3; i++) {
osw.write(ch);
}
latch.countDown();
for (int i = 0; i < (2 * length) / 3; i++) {
osw.write(ch);
}
osw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
//Wait till the osw is partially written
//aggregation starts once the ows has completed 1/3rd of its work
latch.await();
//Aggregate The Logs
logWriter.append(logKey, logValue);
logWriter.close();
}
use of java.io.OutputStreamWriter in project hadoop by apache.
the class TestAggregatedLogFormat method getOutputStreamWriter.
private OutputStreamWriter getOutputStreamWriter(Path srcFilePath, String fileName) throws IOException, FileNotFoundException, UnsupportedEncodingException {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
return osw;
}
use of java.io.OutputStreamWriter in project hadoop by apache.
the class TestClusterMapReduceTestCase method _testMapReduce.
public void _testMapReduce(boolean restart) throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.write("hello4\n");
wr.close();
if (restart) {
stopCluster();
startCluster(false, null);
}
JobConf conf = createJobConf();
conf.setJobName("mr");
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(getFileSystem().listStatus(getOutputDir(), new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
assertTrue(line.contains("hello"));
line = reader.readLine();
}
reader.close();
assertEquals(4, counter);
}
}
Aggregations