use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class DistributedFSCheck method listSubtree.
private void listSubtree(FileStatus rootStatus, SequenceFile.Writer writer) throws IOException {
Path rootFile = rootStatus.getPath();
if (rootStatus.isFile()) {
nrFiles++;
// For a regular file generate <fName,offset> pairs
long blockSize = fs.getDefaultBlockSize(rootFile);
long fileLength = rootStatus.getLen();
for (long offset = 0; offset < fileLength; offset += blockSize) writer.append(new Text(rootFile.toString()), new LongWritable(offset));
return;
}
FileStatus[] children = null;
try {
children = fs.listStatus(rootFile);
} catch (FileNotFoundException fnfe) {
throw new IOException("Could not get listing for " + rootFile);
}
for (int i = 0; i < children.length; i++) listSubtree(children[i], writer);
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestDFSIO method createControlFile.
@SuppressWarnings("deprecation")
private void createControlFile(FileSystem fs, // in bytes
long nrBytes, int nrFiles) throws IOException {
LOG.info("creating control file: " + nrBytes + " bytes, " + nrFiles + " files");
Path controlDir = getControlDir(config);
fs.delete(controlDir, true);
for (int i = 0; i < nrFiles; i++) {
String name = getFileName(i);
Path controlFile = new Path(controlDir, "in_file_" + name);
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, config, controlFile, Text.class, LongWritable.class, CompressionType.NONE);
writer.append(new Text(name), new LongWritable(nrBytes));
} catch (Exception e) {
throw new IOException(e.getLocalizedMessage());
} finally {
if (writer != null)
writer.close();
writer = null;
}
}
LOG.info("created control files for: " + nrFiles + " files");
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestSequenceFileAsTextInputFormat method testFormat.
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
Path file = new Path(dir, "test.seq");
Reporter reporter = Reporter.NULL;
int seed = new Random().nextInt();
//LOG.info("seed = "+seed);
Random random = new Random(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) {
//LOG.info("creating; entries = " + length);
// create a file with length entries
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, file, IntWritable.class, LongWritable.class);
try {
for (int i = 0; i < length; i++) {
IntWritable key = new IntWritable(i);
LongWritable value = new LongWritable(10 * i);
writer.append(key, value);
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
InputFormat<Text, Text> format = new SequenceFileAsTextInputFormat();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
//LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
//LOG.info("splitting: got = " + splits.length);
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.length; j++) {
RecordReader<Text, Text> reader = format.getRecordReader(splits[j], job, reporter);
Class readerClass = reader.getClass();
assertEquals("reader class is SequenceFileAsTextRecordReader.", SequenceFileAsTextRecordReader.class, readerClass);
Text value = reader.createValue();
Text key = reader.createKey();
try {
int count = 0;
while (reader.next(key, value)) {
// if (bits.get(key.get())) {
// LOG.info("splits["+j+"]="+splits[j]+" : " + key.get());
// LOG.info("@"+reader.getPos());
// }
int keyInt = Integer.parseInt(key.toString());
assertFalse("Key in multiple partitions.", bits.get(keyInt));
bits.set(keyInt);
count++;
}
//LOG.info("splits["+j+"]="+splits[j]+" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestTupleWritable method makeRandomWritables.
private Writable[] makeRandomWritables() {
Random r = new Random();
Writable[] writs = { new BooleanWritable(r.nextBoolean()), new FloatWritable(r.nextFloat()), new FloatWritable(r.nextFloat()), new IntWritable(r.nextInt()), new LongWritable(r.nextLong()), new BytesWritable("dingo".getBytes()), new LongWritable(r.nextLong()), new IntWritable(r.nextInt()), new BytesWritable("yak".getBytes()), new IntWritable(r.nextInt()) };
return writs;
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestTupleWritable method testNestedIterable.
@Test
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = { new BooleanWritable(r.nextBoolean()), new FloatWritable(r.nextFloat()), new FloatWritable(r.nextFloat()), new IntWritable(r.nextInt()), new LongWritable(r.nextLong()), new BytesWritable("dingo".getBytes()), new LongWritable(r.nextLong()), new IntWritable(r.nextInt()), new BytesWritable("yak".getBytes()), new IntWritable(r.nextInt()) };
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
Aggregations