Search in sources :

Example 11 with ReaderContextBuilder

use of org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder in project hbase by apache.

the class TestHStoreFile method bloomWriteRead.

private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception {
    float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
    Path f = writer.getPath();
    long now = EnvironmentEdgeManager.currentTime();
    for (int i = 0; i < 2000; i += 2) {
        String row = String.format(localFormatter, i);
        KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), now, Bytes.toBytes("value"));
        writer.append(kv);
    }
    writer.close();
    ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
    HFileInfo fileInfo = new HFileInfo(context, conf);
    StoreFileReader reader = new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
    fileInfo.initMetaAndIndex(reader.getHFileReader());
    reader.loadFileInfo();
    reader.loadBloomfilter();
    StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
    // check false positives rate
    int falsePos = 0;
    int falseNeg = 0;
    for (int i = 0; i < 2000; i++) {
        String row = String.format(localFormatter, i);
        TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
        columns.add(Bytes.toBytes("family:col"));
        Scan scan = new Scan().withStartRow(Bytes.toBytes(row)).withStopRow(Bytes.toBytes(row), true);
        scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes("family:col"));
        HStore store = mock(HStore.class);
        when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
        boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
        if (i % 2 == 0) {
            if (!exists) {
                falseNeg++;
            }
        } else {
            if (exists) {
                falsePos++;
            }
        }
    }
    // evict because we are about to delete the file
    reader.close(true);
    fs.delete(f, true);
    assertEquals("False negatives: " + falseNeg, 0, falseNeg);
    int maxFalsePos = (int) (2 * 2000 * err);
    assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + maxFalsePos + ")", falsePos <= maxFalsePos);
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) HFileInfo(org.apache.hadoop.hbase.io.hfile.HFileInfo) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TreeSet(java.util.TreeSet) ReaderContext(org.apache.hadoop.hbase.io.hfile.ReaderContext) ReaderContextBuilder(org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder) Scan(org.apache.hadoop.hbase.client.Scan)

Example 12 with ReaderContextBuilder

use of org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder in project hbase by apache.

the class TestHalfStoreFileReader method doTestOfScanAndReseek.

private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, CacheConfig cacheConf) throws IOException {
    ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, p).build();
    HFileInfo fileInfo = new HFileInfo(context, TEST_UTIL.getConfiguration());
    final HalfStoreFileReader halfreader = new HalfStoreFileReader(context, fileInfo, cacheConf, bottom, new AtomicInteger(0), TEST_UTIL.getConfiguration());
    fileInfo.initMetaAndIndex(halfreader.getHFileReader());
    halfreader.loadFileInfo();
    final HFileScanner scanner = halfreader.getScanner(false, false);
    scanner.seekTo();
    Cell curr;
    do {
        curr = scanner.getCell();
        KeyValue reseekKv = getLastOnCol(curr);
        int ret = scanner.reseekTo(reseekKv);
        assertTrue("reseek to returned: " + ret, ret > 0);
    // System.out.println(curr + ": " + ret);
    } while (scanner.next());
    int ret = scanner.reseekTo(getLastOnCol(curr));
    // System.out.println("Last reseek: " + ret);
    assertTrue(ret > 0);
    halfreader.close(true);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReaderContext(org.apache.hadoop.hbase.io.hfile.ReaderContext) ReaderContextBuilder(org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) Cell(org.apache.hadoop.hbase.Cell) HFileInfo(org.apache.hadoop.hbase.io.hfile.HFileInfo)

Aggregations

ReaderContextBuilder (org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder)12 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)11 HFileInfo (org.apache.hadoop.hbase.io.hfile.HFileInfo)11 ReaderContext (org.apache.hadoop.hbase.io.hfile.ReaderContext)11 Path (org.apache.hadoop.fs.Path)9 Test (org.junit.Test)7 KeyValue (org.apache.hadoop.hbase.KeyValue)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 Scan (org.apache.hadoop.hbase.client.Scan)5 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)5 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)5 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)3 TreeSet (java.util.TreeSet)2 Cell (org.apache.hadoop.hbase.Cell)2 Get (org.apache.hadoop.hbase.client.Get)2 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 HashMap (java.util.HashMap)1