use of java.io.Writer in project hadoop by apache.
the class CgroupsLCEResourcesHandler method updateCgroup.
private void updateCgroup(String controller, String groupName, String param, String value) throws IOException {
String path = pathForCgroup(controller, groupName);
param = controller + "." + param;
if (LOG.isDebugEnabled()) {
LOG.debug("updateCgroup: " + path + ": " + param + "=" + value);
}
PrintWriter pw = null;
try {
File file = new File(path + "/" + param);
Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
pw = new PrintWriter(w);
pw.write(value);
} catch (IOException e) {
throw new IOException("Unable to set " + param + "=" + value + " for cgroup at: " + path, e);
} finally {
if (pw != null) {
boolean hasError = pw.checkError();
pw.close();
if (hasError) {
throw new IOException("Unable to set " + param + "=" + value + " for cgroup at: " + path);
}
if (pw.checkError()) {
throw new IOException("Error while closing cgroup file " + path);
}
}
}
}
use of java.io.Writer in project hadoop by apache.
the class TestMRKeyValueTextInputFormat method testFormat.
@Test
public void testFormat() throws Exception {
Job job = Job.getInstance(new Configuration(defaultConf));
Path file = new Path(workDir, "test.txt");
int seed = new Random().nextInt();
LOG.info("seed = " + seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int MAX_LENGTH = 10000;
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.debug("splitting: got = " + splits.size());
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= " + splits.get(j));
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<Text, Text> reader = format.createRecordReader(splits.get(j), context);
Class<?> clazz = reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.", KeyValueLineRecordReader.class, clazz);
MapContext<Text, Text, Text, Text> mcontext = new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), splits.get(j));
reader.initialize(splits.get(j), mcontext);
Text key = null;
Text value = null;
try {
int count = 0;
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
clazz = key.getClass();
assertEquals("Key class is Text.", Text.class, clazz);
value = reader.getCurrentValue();
clazz = value.getClass();
assertEquals("Value class is Text.", Text.class, clazz);
final int k = Integer.parseInt(key.toString());
final int v = Integer.parseInt(value.toString());
assertEquals("Bad key", 0, k % 2);
assertEquals("Mismatched key/value", k / 2, v);
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]=" + splits.get(j) + " count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
use of java.io.Writer in project hadoop by apache.
the class TestMRKeyValueTextInputFormat method testSplitableCodecs.
@Test
public void testSplitableCodecs() throws Exception {
final Job job = Job.getInstance(defaultConf);
final Configuration conf = job.getConfiguration();
// Create the codec
CompressionCodec codec = null;
try {
codec = (CompressionCodec) ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file = new Path(workDir, "test" + codec.getDefaultExtension());
int seed = new Random().nextInt();
LOG.info("seed = " + seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int MAX_LENGTH = 500000;
FileInputFormat.setMaxInputSplitSize(job, MAX_LENGTH / 20);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
// create a file with length entries
Writer writer = new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
assertTrue("KVTIF claims not splittable", format.isSplitable(job, file));
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= " + splits.get(j));
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<Text, Text> reader = format.createRecordReader(splits.get(j), context);
Class<?> clazz = reader.getClass();
MapContext<Text, Text, Text, Text> mcontext = new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), splits.get(j));
reader.initialize(splits.get(j), mcontext);
Text key = null;
Text value = null;
try {
int count = 0;
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
value = reader.getCurrentValue();
final int k = Integer.parseInt(key.toString());
final int v = Integer.parseInt(value.toString());
assertEquals("Bad key", 0, k % 2);
assertEquals("Mismatched key/value", k / 2, v);
LOG.debug("read " + k + "," + v);
assertFalse(k + "," + v + " in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
if (count > 0) {
LOG.info("splits[" + j + "]=" + splits.get(j) + " count=" + count);
} else {
LOG.debug("splits[" + j + "]=" + splits.get(j) + " count=" + count);
}
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
use of java.io.Writer in project hadoop by apache.
the class TestFixedLengthInputFormat method createFile.
// Create a file containing fixed length records with random data
private ArrayList<String> createFile(Path targetFile, CompressionCodec codec, int recordLen, int numRecords) throws IOException {
ArrayList<String> recordList = new ArrayList<String>(numRecords);
OutputStream ostream = localFs.create(targetFile);
if (codec != null) {
ostream = codec.createOutputStream(ostream);
}
Writer writer = new OutputStreamWriter(ostream);
try {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < recordLen; j++) {
sb.append(chars[charRand.nextInt(chars.length)]);
}
String recordData = sb.toString();
recordList.add(recordData);
writer.write(recordData);
sb.setLength(0);
}
} finally {
writer.close();
}
return recordList;
}
use of java.io.Writer in project hadoop by apache.
the class TestLineRecordReaderJobs method createInputFile.
/**
* Writes the input test file
*
* @param conf
* @throws IOException
*/
public void createInputFile(Configuration conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(inputDir, "test.txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
writer.write("abc\ndef\t\nghi\njkl");
writer.close();
}
Aggregations