use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestVLong method testVLongByte.
@Test
public void testVLongByte() throws IOException {
FSDataOutputStream out = fs.create(path);
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
Utils.writeVLong(out, i);
}
out.close();
Assert.assertEquals("Incorrect encoded size", (1 << Byte.SIZE) + 96, fs.getFileStatus(path).getLen());
FSDataInputStream in = fs.open(path);
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
long n = Utils.readVLong(in);
Assert.assertEquals(n, i);
}
in.close();
fs.delete(path, false);
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestVLong method writeAndVerify.
private long writeAndVerify(int shift) throws IOException {
FSDataOutputStream out = fs.create(path);
for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
Utils.writeVLong(out, ((long) i) << shift);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
long n = Utils.readVLong(in);
Assert.assertEquals(n, ((long) i) << shift);
}
in.close();
long ret = fs.getFileStatus(path).getLen();
fs.delete(path, false);
return ret;
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestTFileSeek method seekTFile.
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
KeySampler kSampler = new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(), keyLenGen);
Scanner scanner = reader.createScanner();
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
scanner.lowerBound(key.getBytes(), 0, key.getLength());
if (!scanner.atEnd()) {
scanner.entry().get(key, val);
totalBytes += key.getLength();
totalBytes += val.getLength();
} else {
++miss;
}
}
timer.stop();
// in us.
double duration = (double) timer.read() / 1000;
System.out.printf("time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n", timer.toString(), NanoTimer.nanoTimeToString(timer.read() / options.seekCount), options.seekCount - miss, miss, (double) totalBytes / 1024 / (options.seekCount - miss));
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestTFile method unsortedWithSomeCodec.
// unsorted with some codec
void unsortedWithSomeCodec(String codec) throws IOException {
Path uTfile = new Path(ROOT, "unsorted.tfile");
FSDataOutputStream fout = createFSOutput(uTfile);
Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
writeRecords(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(uTfile);
Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);
Scanner scanner = reader.createScanner();
readAllRecords(scanner);
scanner.close();
reader.close();
fin.close();
fs.delete(uTfile, true);
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestTFile method testMetaBlocks.
// test meta blocks for tfiles
@Test
public void testMetaBlocks() throws IOException {
Path mFile = new Path(ROOT, "meta.tfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
someTestingWithMetaBlock(writer, "none");
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
Aggregations