Search in sources :

Example 6 with Writer

use of org.apache.hadoop.hbase.io.hfile.HFile.Writer in project hbase by apache.

the class TestHFileSeek method createTFile.

private void createTFile() throws IOException {
    long totalBytes = 0;
    FSDataOutputStream fout = createFSOutput(path, fs);
    try {
        HFileContext context = new HFileContextBuilder().withBlockSize(options.minBlockSize).withCompression(HFileWriterImpl.compressionByName(options.compress)).build();
        Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout).withFileContext(context).withComparator(CellComparator.COMPARATOR).create();
        try {
            BytesWritable key = new BytesWritable();
            BytesWritable val = new BytesWritable();
            timer.start();
            for (long i = 0; true; ++i) {
                if (i % 1000 == 0) {
                    // test the size for every 1000 rows.
                    if (fs.getFileStatus(path).getLen() >= options.fileSize) {
                        break;
                    }
                }
                kvGen.next(key, val, false);
                byte[] k = new byte[key.getLength()];
                System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
                byte[] v = new byte[val.getLength()];
                System.arraycopy(val.getBytes(), 0, v, 0, key.getLength());
                KeyValue kv = new KeyValue(k, CF, QUAL, v);
                writer.append(kv);
                totalBytes += kv.getKeyLength();
                totalBytes += kv.getValueLength();
            }
            timer.stop();
        } finally {
            writer.close();
        }
    } finally {
        fout.close();
    }
    // in us.
    double duration = (double) timer.read() / 1000;
    long fsize = fs.getFileStatus(path).getLen();
    System.out.printf("time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration);
    System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) BytesWritable(org.apache.hadoop.io.BytesWritable) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Writer(org.apache.hadoop.hbase.io.hfile.HFile.Writer)

Aggregations

Writer (org.apache.hadoop.hbase.io.hfile.HFile.Writer)6 Path (org.apache.hadoop.fs.Path)5 Reader (org.apache.hadoop.hbase.io.hfile.HFile.Reader)5 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Test (org.junit.Test)3 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 ByteBuffer (java.nio.ByteBuffer)1 Compression (org.apache.hadoop.hbase.io.compress.Compression)1 BytesWritable (org.apache.hadoop.io.BytesWritable)1