use of org.apache.hadoop.hbase.io.hfile.HFile.Writer in project hbase by apache.
the class TestHFileSeek method createTFile.
private void createTFile() throws IOException {
long totalBytes = 0;
FSDataOutputStream fout = createFSOutput(path, fs);
try {
HFileContext context = new HFileContextBuilder().withBlockSize(options.minBlockSize).withCompression(HFileWriterImpl.compressionByName(options.compress)).build();
Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout).withFileContext(context).withComparator(CellComparator.COMPARATOR).create();
try {
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.start();
for (long i = 0; true; ++i) {
if (i % 1000 == 0) {
// test the size for every 1000 rows.
if (fs.getFileStatus(path).getLen() >= options.fileSize) {
break;
}
}
kvGen.next(key, val, false);
byte[] k = new byte[key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
byte[] v = new byte[val.getLength()];
System.arraycopy(val.getBytes(), 0, v, 0, key.getLength());
KeyValue kv = new KeyValue(k, CF, QUAL, v);
writer.append(kv);
totalBytes += kv.getKeyLength();
totalBytes += kv.getValueLength();
}
timer.stop();
} finally {
writer.close();
}
} finally {
fout.close();
}
// in us.
double duration = (double) timer.read() / 1000;
long fsize = fs.getFileStatus(path).getLen();
System.out.printf("time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration);
System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
Aggregations