Search in sources :

Example 56 with BufferedWriter

use of java.io.BufferedWriter in project hadoop by apache.

the class TestCompressionEmulationUtils method testFileQueueDecompression.

/**
   * Test of {@link FileQueue} can identify compressed file and provide
   * readers to extract uncompressed data only if input-compression is enabled.
   */
@Test
public void testFileQueueDecompression() throws IOException {
    JobConf conf = new JobConf();
    FileSystem lfs = FileSystem.getLocal(conf);
    String inputLine = "Hi Hello!";
    CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
    CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
    org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
    org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf, GzipCodec.class);
    // define the test's root temp directory
    Path rootTempDir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(lfs.getUri(), lfs.getWorkingDirectory());
    Path tempDir = new Path(rootTempDir, "TestFileQueueDecompression");
    lfs.delete(tempDir, true);
    // create a compressed file
    Path compressedFile = new Path(tempDir, "test");
    OutputStream out = CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, conf);
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
    writer.write(inputLine);
    writer.close();
    compressedFile = compressedFile.suffix(".gz");
    // now read back the data from the compressed stream using FileQueue
    long fileSize = lfs.listStatus(compressedFile)[0].getLen();
    CombineFileSplit split = new CombineFileSplit(new Path[] { compressedFile }, new long[] { fileSize });
    FileQueue queue = new FileQueue(split, conf);
    byte[] bytes = new byte[inputLine.getBytes().length];
    queue.read(bytes);
    queue.close();
    String readLine = new String(bytes);
    assertEquals("Compression/Decompression error", inputLine, readLine);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) DataOutputStream(java.io.DataOutputStream) OutputStream(java.io.OutputStream) OutputStreamWriter(java.io.OutputStreamWriter) CombineFileSplit(org.apache.hadoop.mapreduce.lib.input.CombineFileSplit) JobConf(org.apache.hadoop.mapred.JobConf) BufferedWriter(java.io.BufferedWriter) Test(org.junit.Test)

Example 57 with BufferedWriter

use of java.io.BufferedWriter in project hadoop by apache.

the class FileSystemTimelineWriterImpl method write.

private synchronized void write(String clusterId, String userId, String flowName, String flowVersion, long flowRun, String appId, TimelineEntity entity, TimelineWriteResponse response) throws IOException {
    PrintWriter out = null;
    try {
        String dir = mkdirs(outputRoot, ENTITIES_DIR, clusterId, userId, escape(flowName), escape(flowVersion), String.valueOf(flowRun), appId, entity.getType());
        String fileName = dir + entity.getId() + TIMELINE_SERVICE_STORAGE_EXTENSION;
        out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(fileName, true), "UTF-8")));
        out.println(TimelineUtils.dumpTimelineRecordtoJSON(entity));
        out.write("\n");
    } catch (IOException ioe) {
        TimelineWriteError error = new TimelineWriteError();
        error.setEntityId(entity.getId());
        error.setEntityType(entity.getType());
        /*
       * TODO: set an appropriate error code after PoC could possibly be:
       * error.setErrorCode(TimelineWriteError.IO_EXCEPTION);
       */
        response.addError(error);
    } finally {
        if (out != null) {
            out.close();
        }
    }
}
Also used : FileOutputStream(java.io.FileOutputStream) TimelineWriteError(org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError) OutputStreamWriter(java.io.OutputStreamWriter) IOException(java.io.IOException) PrintWriter(java.io.PrintWriter) BufferedWriter(java.io.BufferedWriter)

Example 58 with BufferedWriter

use of java.io.BufferedWriter in project hadoop by apache.

the class TestFileSystemTimelineReaderImpl method initializeDataDirectory.

public static void initializeDataDirectory(String rootDir) throws Exception {
    loadEntityData(rootDir);
    // Create app flow mapping file.
    CSVFormat format = CSVFormat.DEFAULT.withHeader("APP", "USER", "FLOW", "FLOWRUN");
    String appFlowMappingFile = rootDir + File.separator + "entities" + File.separator + "cluster1" + File.separator + FileSystemTimelineReaderImpl.APP_FLOW_MAPPING_FILE;
    try (PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(appFlowMappingFile, true)));
        CSVPrinter printer = new CSVPrinter(out, format)) {
        printer.printRecord("app1", "user1", "flow1", 1);
        printer.printRecord("app2", "user1", "flow1,flow", 1);
        printer.close();
    }
    (new File(rootDir)).deleteOnExit();
}
Also used : CSVPrinter(org.apache.commons.csv.CSVPrinter) FileWriter(java.io.FileWriter) CSVFormat(org.apache.commons.csv.CSVFormat) File(java.io.File) PrintWriter(java.io.PrintWriter) BufferedWriter(java.io.BufferedWriter)

Example 59 with BufferedWriter

use of java.io.BufferedWriter in project hadoop by apache.

the class TestProcfsBasedProcessTree method writeStatFiles.

/**
   * Write stat files under the specified pid directories with data setup in the
   * corresponding ProcessStatInfo objects
   *
   * @param procfsRootDir
   *          root directory of procfs file system
   * @param pids
   *          the PID directories under which to create the stat file
   * @param procs
   *          corresponding ProcessStatInfo objects whose data should be written
   *          to the stat files.
   * @throws IOException
   *           if stat files could not be written
   */
public static void writeStatFiles(File procfsRootDir, String[] pids, ProcessStatInfo[] procs, ProcessTreeSmapMemInfo[] smaps) throws IOException {
    for (int i = 0; i < pids.length; i++) {
        File statFile = new File(new File(procfsRootDir, pids[i]), ProcfsBasedProcessTree.PROCFS_STAT_FILE);
        BufferedWriter bw = null;
        try {
            FileWriter fw = new FileWriter(statFile);
            bw = new BufferedWriter(fw);
            bw.write(procs[i].getStatLine());
            LOG.info("wrote stat file for " + pids[i] + " with contents: " + procs[i].getStatLine());
        } finally {
            // not handling exception - will throw an error and fail the test.
            if (bw != null) {
                bw.close();
            }
        }
        if (smaps != null) {
            File smapFile = new File(new File(procfsRootDir, pids[i]), ProcfsBasedProcessTree.SMAPS);
            bw = null;
            try {
                FileWriter fw = new FileWriter(smapFile);
                bw = new BufferedWriter(fw);
                bw.write(smaps[i].toString());
                bw.flush();
                LOG.info("wrote smap file for " + pids[i] + " with contents: " + smaps[i].toString());
            } finally {
                // not handling exception - will throw an error and fail the test.
                if (bw != null) {
                    bw.close();
                }
            }
        }
    }
}
Also used : FileWriter(java.io.FileWriter) File(java.io.File) BufferedWriter(java.io.BufferedWriter)

Example 60 with BufferedWriter

use of java.io.BufferedWriter in project hadoop by apache.

the class NativeAzureFileSystemBaseTest method writeString.

private void writeString(FSDataOutputStream outputStream, String value) throws IOException {
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outputStream));
    writer.write(value);
    writer.close();
}
Also used : OutputStreamWriter(java.io.OutputStreamWriter) BufferedWriter(java.io.BufferedWriter)

Aggregations

BufferedWriter (java.io.BufferedWriter)4214 FileWriter (java.io.FileWriter)2181 File (java.io.File)1879 IOException (java.io.IOException)1847 OutputStreamWriter (java.io.OutputStreamWriter)1344 BufferedReader (java.io.BufferedReader)747 FileOutputStream (java.io.FileOutputStream)656 ArrayList (java.util.ArrayList)386 FileReader (java.io.FileReader)376 InputStreamReader (java.io.InputStreamReader)349 PrintWriter (java.io.PrintWriter)324 Writer (java.io.Writer)324 Test (org.junit.Test)286 FileNotFoundException (java.io.FileNotFoundException)217 OutputStream (java.io.OutputStream)213 HashMap (java.util.HashMap)200 Path (java.nio.file.Path)177 InputStream (java.io.InputStream)171 FileInputStream (java.io.FileInputStream)158 StringWriter (java.io.StringWriter)143