Search in sources :

Example 36 with Writer

use of java.io.Writer in project hadoop by apache.

the class CgroupsLCEResourcesHandler method updateCgroup.

private void updateCgroup(String controller, String groupName, String param, String value) throws IOException {
    String path = pathForCgroup(controller, groupName);
    param = controller + "." + param;
    if (LOG.isDebugEnabled()) {
        LOG.debug("updateCgroup: " + path + ": " + param + "=" + value);
    }
    PrintWriter pw = null;
    try {
        File file = new File(path + "/" + param);
        Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
        pw = new PrintWriter(w);
        pw.write(value);
    } catch (IOException e) {
        throw new IOException("Unable to set " + param + "=" + value + " for cgroup at: " + path, e);
    } finally {
        if (pw != null) {
            boolean hasError = pw.checkError();
            pw.close();
            if (hasError) {
                throw new IOException("Unable to set " + param + "=" + value + " for cgroup at: " + path);
            }
            if (pw.checkError()) {
                throw new IOException("Error while closing cgroup file " + path);
            }
        }
    }
}
Also used : FileOutputStream(java.io.FileOutputStream) OutputStreamWriter(java.io.OutputStreamWriter) IOException(java.io.IOException) File(java.io.File) OutputStreamWriter(java.io.OutputStreamWriter) PrintWriter(java.io.PrintWriter) Writer(java.io.Writer) PrintWriter(java.io.PrintWriter)

Example 37 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestMRKeyValueTextInputFormat method testFormat.

@Test
public void testFormat() throws Exception {
    Job job = Job.getInstance(new Configuration(defaultConf));
    Path file = new Path(workDir, "test.txt");
    int seed = new Random().nextInt();
    LOG.info("seed = " + seed);
    Random random = new Random(seed);
    localFs.delete(workDir, true);
    FileInputFormat.setInputPaths(job, workDir);
    final int MAX_LENGTH = 10000;
    // for a variety of lengths
    for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) {
        LOG.debug("creating; entries = " + length);
        // create a file with length entries
        Writer writer = new OutputStreamWriter(localFs.create(file));
        try {
            for (int i = 0; i < length; i++) {
                writer.write(Integer.toString(i * 2));
                writer.write("\t");
                writer.write(Integer.toString(i));
                writer.write("\n");
            }
        } finally {
            writer.close();
        }
        // try splitting the file in a variety of sizes
        KeyValueTextInputFormat format = new KeyValueTextInputFormat();
        for (int i = 0; i < 3; i++) {
            int numSplits = random.nextInt(MAX_LENGTH / 20) + 1;
            LOG.debug("splitting: requesting = " + numSplits);
            List<InputSplit> splits = format.getSplits(job);
            LOG.debug("splitting: got =        " + splits.size());
            // check each split
            BitSet bits = new BitSet(length);
            for (int j = 0; j < splits.size(); j++) {
                LOG.debug("split[" + j + "]= " + splits.get(j));
                TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
                RecordReader<Text, Text> reader = format.createRecordReader(splits.get(j), context);
                Class<?> clazz = reader.getClass();
                assertEquals("reader class is KeyValueLineRecordReader.", KeyValueLineRecordReader.class, clazz);
                MapContext<Text, Text, Text, Text> mcontext = new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), splits.get(j));
                reader.initialize(splits.get(j), mcontext);
                Text key = null;
                Text value = null;
                try {
                    int count = 0;
                    while (reader.nextKeyValue()) {
                        key = reader.getCurrentKey();
                        clazz = key.getClass();
                        assertEquals("Key class is Text.", Text.class, clazz);
                        value = reader.getCurrentValue();
                        clazz = value.getClass();
                        assertEquals("Value class is Text.", Text.class, clazz);
                        final int k = Integer.parseInt(key.toString());
                        final int v = Integer.parseInt(value.toString());
                        assertEquals("Bad key", 0, k % 2);
                        assertEquals("Mismatched key/value", k / 2, v);
                        LOG.debug("read " + v);
                        assertFalse("Key in multiple partitions.", bits.get(v));
                        bits.set(v);
                        count++;
                    }
                    LOG.debug("splits[" + j + "]=" + splits.get(j) + " count=" + count);
                } finally {
                    reader.close();
                }
            }
            assertEquals("Some keys in no partition.", length, bits.cardinality());
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MapContextImpl(org.apache.hadoop.mapreduce.task.MapContextImpl) BitSet(java.util.BitSet) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Random(java.util.Random) OutputStreamWriter(java.io.OutputStreamWriter) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) OutputStreamWriter(java.io.OutputStreamWriter) Writer(java.io.Writer) Test(org.junit.Test)

Example 38 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestMRKeyValueTextInputFormat method testSplitableCodecs.

@Test
public void testSplitableCodecs() throws Exception {
    final Job job = Job.getInstance(defaultConf);
    final Configuration conf = job.getConfiguration();
    // Create the codec
    CompressionCodec codec = null;
    try {
        codec = (CompressionCodec) ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"), conf);
    } catch (ClassNotFoundException cnfe) {
        throw new IOException("Illegal codec!");
    }
    Path file = new Path(workDir, "test" + codec.getDefaultExtension());
    int seed = new Random().nextInt();
    LOG.info("seed = " + seed);
    Random random = new Random(seed);
    localFs.delete(workDir, true);
    FileInputFormat.setInputPaths(job, workDir);
    final int MAX_LENGTH = 500000;
    FileInputFormat.setMaxInputSplitSize(job, MAX_LENGTH / 20);
    // for a variety of lengths
    for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 4) + 1) {
        LOG.info("creating; entries = " + length);
        // create a file with length entries
        Writer writer = new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
        try {
            for (int i = 0; i < length; i++) {
                writer.write(Integer.toString(i * 2));
                writer.write("\t");
                writer.write(Integer.toString(i));
                writer.write("\n");
            }
        } finally {
            writer.close();
        }
        // try splitting the file in a variety of sizes
        KeyValueTextInputFormat format = new KeyValueTextInputFormat();
        assertTrue("KVTIF claims not splittable", format.isSplitable(job, file));
        for (int i = 0; i < 3; i++) {
            int numSplits = random.nextInt(MAX_LENGTH / 2000) + 1;
            LOG.info("splitting: requesting = " + numSplits);
            List<InputSplit> splits = format.getSplits(job);
            LOG.info("splitting: got =        " + splits.size());
            // check each split
            BitSet bits = new BitSet(length);
            for (int j = 0; j < splits.size(); j++) {
                LOG.debug("split[" + j + "]= " + splits.get(j));
                TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
                RecordReader<Text, Text> reader = format.createRecordReader(splits.get(j), context);
                Class<?> clazz = reader.getClass();
                MapContext<Text, Text, Text, Text> mcontext = new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), splits.get(j));
                reader.initialize(splits.get(j), mcontext);
                Text key = null;
                Text value = null;
                try {
                    int count = 0;
                    while (reader.nextKeyValue()) {
                        key = reader.getCurrentKey();
                        value = reader.getCurrentValue();
                        final int k = Integer.parseInt(key.toString());
                        final int v = Integer.parseInt(value.toString());
                        assertEquals("Bad key", 0, k % 2);
                        assertEquals("Mismatched key/value", k / 2, v);
                        LOG.debug("read " + k + "," + v);
                        assertFalse(k + "," + v + " in multiple partitions.", bits.get(v));
                        bits.set(v);
                        count++;
                    }
                    if (count > 0) {
                        LOG.info("splits[" + j + "]=" + splits.get(j) + " count=" + count);
                    } else {
                        LOG.debug("splits[" + j + "]=" + splits.get(j) + " count=" + count);
                    }
                } finally {
                    reader.close();
                }
            }
            assertEquals("Some keys in no partition.", length, bits.cardinality());
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MapContextImpl(org.apache.hadoop.mapreduce.task.MapContextImpl) BitSet(java.util.BitSet) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) IOException(java.io.IOException) Random(java.util.Random) OutputStreamWriter(java.io.OutputStreamWriter) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) OutputStreamWriter(java.io.OutputStreamWriter) Writer(java.io.Writer) Test(org.junit.Test)

Example 39 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestFixedLengthInputFormat method createFile.

// Create a file containing fixed length records with random data
private ArrayList<String> createFile(Path targetFile, CompressionCodec codec, int recordLen, int numRecords) throws IOException {
    ArrayList<String> recordList = new ArrayList<String>(numRecords);
    OutputStream ostream = localFs.create(targetFile);
    if (codec != null) {
        ostream = codec.createOutputStream(ostream);
    }
    Writer writer = new OutputStreamWriter(ostream);
    try {
        StringBuffer sb = new StringBuffer();
        for (int i = 0; i < numRecords; i++) {
            for (int j = 0; j < recordLen; j++) {
                sb.append(chars[charRand.nextInt(chars.length)]);
            }
            String recordData = sb.toString();
            recordList.add(recordData);
            writer.write(recordData);
            sb.setLength(0);
        }
    } finally {
        writer.close();
    }
    return recordList;
}
Also used : OutputStream(java.io.OutputStream) ArrayList(java.util.ArrayList) OutputStreamWriter(java.io.OutputStreamWriter) OutputStreamWriter(java.io.OutputStreamWriter) Writer(java.io.Writer)

Example 40 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestLineRecordReaderJobs method createInputFile.

/**
   * Writes the input test file
   *
   * @param conf
   * @throws IOException
   */
public void createInputFile(Configuration conf) throws IOException {
    FileSystem localFs = FileSystem.getLocal(conf);
    Path file = new Path(inputDir, "test.txt");
    Writer writer = new OutputStreamWriter(localFs.create(file));
    writer.write("abc\ndef\t\nghi\njkl");
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStreamWriter(java.io.OutputStreamWriter) Writer(java.io.Writer) OutputStreamWriter(java.io.OutputStreamWriter)

Aggregations

Writer (java.io.Writer)1259 OutputStreamWriter (java.io.OutputStreamWriter)512 IOException (java.io.IOException)414 StringWriter (java.io.StringWriter)300 File (java.io.File)269 FileOutputStream (java.io.FileOutputStream)196 BufferedWriter (java.io.BufferedWriter)178 FileWriter (java.io.FileWriter)174 PrintWriter (java.io.PrintWriter)159 OutputStream (java.io.OutputStream)120 Test (org.junit.Test)109 InputStreamReader (java.io.InputStreamReader)71 ByteArrayOutputStream (java.io.ByteArrayOutputStream)64 BufferedReader (java.io.BufferedReader)62 Reader (java.io.Reader)62 HashMap (java.util.HashMap)59 Map (java.util.Map)59 ArrayList (java.util.ArrayList)58 InputStream (java.io.InputStream)54 Properties (java.util.Properties)39