use of java.io.BufferedWriter in project hadoop by apache.
the class TestCompressionEmulationUtils method testFileQueueDecompression.
/**
* Test of {@link FileQueue} can identify compressed file and provide
* readers to extract uncompressed data only if input-compression is enabled.
*/
@Test
public void testFileQueueDecompression() throws IOException {
JobConf conf = new JobConf();
FileSystem lfs = FileSystem.getLocal(conf);
String inputLine = "Hi Hello!";
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf, GzipCodec.class);
// define the test's root temp directory
Path rootTempDir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(lfs.getUri(), lfs.getWorkingDirectory());
Path tempDir = new Path(rootTempDir, "TestFileQueueDecompression");
lfs.delete(tempDir, true);
// create a compressed file
Path compressedFile = new Path(tempDir, "test");
OutputStream out = CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, conf);
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
writer.write(inputLine);
writer.close();
compressedFile = compressedFile.suffix(".gz");
// now read back the data from the compressed stream using FileQueue
long fileSize = lfs.listStatus(compressedFile)[0].getLen();
CombineFileSplit split = new CombineFileSplit(new Path[] { compressedFile }, new long[] { fileSize });
FileQueue queue = new FileQueue(split, conf);
byte[] bytes = new byte[inputLine.getBytes().length];
queue.read(bytes);
queue.close();
String readLine = new String(bytes);
assertEquals("Compression/Decompression error", inputLine, readLine);
}
use of java.io.BufferedWriter in project hadoop by apache.
the class FileSystemTimelineWriterImpl method write.
private synchronized void write(String clusterId, String userId, String flowName, String flowVersion, long flowRun, String appId, TimelineEntity entity, TimelineWriteResponse response) throws IOException {
PrintWriter out = null;
try {
String dir = mkdirs(outputRoot, ENTITIES_DIR, clusterId, userId, escape(flowName), escape(flowVersion), String.valueOf(flowRun), appId, entity.getType());
String fileName = dir + entity.getId() + TIMELINE_SERVICE_STORAGE_EXTENSION;
out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(fileName, true), "UTF-8")));
out.println(TimelineUtils.dumpTimelineRecordtoJSON(entity));
out.write("\n");
} catch (IOException ioe) {
TimelineWriteError error = new TimelineWriteError();
error.setEntityId(entity.getId());
error.setEntityType(entity.getType());
/*
* TODO: set an appropriate error code after PoC could possibly be:
* error.setErrorCode(TimelineWriteError.IO_EXCEPTION);
*/
response.addError(error);
} finally {
if (out != null) {
out.close();
}
}
}
use of java.io.BufferedWriter in project hadoop by apache.
the class TestFileSystemTimelineReaderImpl method initializeDataDirectory.
public static void initializeDataDirectory(String rootDir) throws Exception {
loadEntityData(rootDir);
// Create app flow mapping file.
CSVFormat format = CSVFormat.DEFAULT.withHeader("APP", "USER", "FLOW", "FLOWRUN");
String appFlowMappingFile = rootDir + File.separator + "entities" + File.separator + "cluster1" + File.separator + FileSystemTimelineReaderImpl.APP_FLOW_MAPPING_FILE;
try (PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(appFlowMappingFile, true)));
CSVPrinter printer = new CSVPrinter(out, format)) {
printer.printRecord("app1", "user1", "flow1", 1);
printer.printRecord("app2", "user1", "flow1,flow", 1);
printer.close();
}
(new File(rootDir)).deleteOnExit();
}
use of java.io.BufferedWriter in project hadoop by apache.
the class TestProcfsBasedProcessTree method writeStatFiles.
/**
* Write stat files under the specified pid directories with data setup in the
* corresponding ProcessStatInfo objects
*
* @param procfsRootDir
* root directory of procfs file system
* @param pids
* the PID directories under which to create the stat file
* @param procs
* corresponding ProcessStatInfo objects whose data should be written
* to the stat files.
* @throws IOException
* if stat files could not be written
*/
public static void writeStatFiles(File procfsRootDir, String[] pids, ProcessStatInfo[] procs, ProcessTreeSmapMemInfo[] smaps) throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile = new File(new File(procfsRootDir, pids[i]), ProcfsBasedProcessTree.PROCFS_STAT_FILE);
BufferedWriter bw = null;
try {
FileWriter fw = new FileWriter(statFile);
bw = new BufferedWriter(fw);
bw.write(procs[i].getStatLine());
LOG.info("wrote stat file for " + pids[i] + " with contents: " + procs[i].getStatLine());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
if (smaps != null) {
File smapFile = new File(new File(procfsRootDir, pids[i]), ProcfsBasedProcessTree.SMAPS);
bw = null;
try {
FileWriter fw = new FileWriter(smapFile);
bw = new BufferedWriter(fw);
bw.write(smaps[i].toString());
bw.flush();
LOG.info("wrote smap file for " + pids[i] + " with contents: " + smaps[i].toString());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}
use of java.io.BufferedWriter in project hadoop by apache.
the class NativeAzureFileSystemBaseTest method writeString.
private void writeString(FSDataOutputStream outputStream, String value) throws IOException {
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outputStream));
writer.write(value);
writer.close();
}
Aggregations