Search in sources :

Example 51 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestCacheOnWriteInSchema method readStoreFile.

private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL);
    HFile.Reader reader = sf.createReader().getHFileReader();
    try {
        // Open a scanner with (on read) caching disabled
        HFileScanner scanner = reader.getScanner(false, false);
        assertTrue(testDescription, scanner.seekTo());
        // Cribbed from io.hfile.TestCacheOnWrite
        long offset = 0;
        while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
            // Flags: don't cache the block, use pread, this is not a compaction.
            // Also, pass null for expected block type to avoid checking it.
            HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
            BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
            boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
            boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
            if (shouldBeCached != isCached) {
                throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
            }
            offset += block.getOnDiskSizeWithHeader();
        }
    } finally {
        reader.close();
    }
}
Also used : HFileBlock(org.apache.hadoop.hbase.io.hfile.HFileBlock) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey)

Example 52 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestCompoundBloomFilter method writeStoreFile.

private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs) throws IOException {
    conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZES[t]);
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
    cacheConf = new CacheConfig(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
    StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(TEST_UTIL.getDataTestDir()).withBloomType(bt).withFileContext(meta).build();
    assertTrue(w.hasGeneralBloom());
    assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter);
    CompoundBloomFilterWriter cbbf = (CompoundBloomFilterWriter) w.getGeneralBloomWriter();
    int keyCount = 0;
    KeyValue prev = null;
    LOG.debug("Total keys/values to insert: " + kvs.size());
    for (KeyValue kv : kvs) {
        w.append(kv);
        // Validate the key count in the Bloom filter.
        boolean newKey = true;
        if (prev != null) {
            newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, prev) : CellUtil.matchingRowColumn(kv, prev));
        }
        if (newKey)
            ++keyCount;
        assertEquals(keyCount, cbbf.getKeyCount());
        prev = kv;
    }
    w.close();
    return w.getPath();
}
Also used : CompoundBloomFilterWriter(org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter) KeyValue(org.apache.hadoop.hbase.KeyValue) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 53 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class DataBlockEncodingTool method testCodecs.

/**
   * Test a data block encoder on the given HFile. Output results to console.
   * @param kvLimit The limit of KeyValue which will be analyzed.
   * @param hfilePath an HFile path on the file system.
   * @param compressionName Compression algorithm used for comparison.
   * @param doBenchmark Run performance benchmarks.
   * @param doVerify Verify correctness.
   * @throws IOException When pathName is incorrect.
   */
public static void testCodecs(Configuration conf, int kvLimit, String hfilePath, String compressionName, boolean doBenchmark, boolean doVerify) throws IOException {
    // create environment
    Path path = new Path(hfilePath);
    CacheConfig cacheConf = new CacheConfig(conf);
    FileSystem fs = FileSystem.get(conf);
    StoreFile hsf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE);
    StoreFileReader reader = hsf.createReader();
    reader.loadFileInfo();
    KeyValueScanner scanner = reader.getStoreFileScanner(true, true, false, 0, 0, false);
    // run the utilities
    DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
    int majorVersion = reader.getHFileVersion();
    comp.useHBaseChecksum = majorVersion > 2 || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM);
    comp.checkStatistics(scanner, kvLimit);
    if (doVerify) {
        comp.verifyCodecs(scanner, kvLimit);
    }
    if (doBenchmark) {
        comp.benchmarkCodecs();
    }
    comp.displayStatistics();
    // cleanup
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 54 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestBlocksScanned method _testBlocksScanned.

private void _testBlocksScanned(HTableDescriptor table) throws Exception {
    Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
    addContent(r, FAMILY, COL);
    r.flush(true);
    CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
    long before = stats.getHitCount() + stats.getMissCount();
    // Do simple test of getting one row only first.
    Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
    scan.addColumn(FAMILY, COL);
    scan.setMaxVersions(1);
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    while (s.next(results)) ;
    s.close();
    int expectResultSize = 'z' - 'a';
    assertEquals(expectResultSize, results.size());
    int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
    Assert.assertEquals(2, kvPerBlock);
    long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
    long expectIndexBlockRead = expectDataBlockRead;
    assertEquals(expectIndexBlockRead + expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
Also used : ArrayList(java.util.ArrayList) CacheStats(org.apache.hadoop.hbase.io.hfile.CacheStats) Scan(org.apache.hadoop.hbase.client.Scan) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell)

Example 55 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestFSErrorsExposed method testHFileScannerThrowsErrors.

/**
   * Injects errors into the pread calls of an on-disk file, and makes
   * sure those bubble up to the HFile scanner
   */
@Test
public void testHFileScannerThrowsErrors() throws IOException {
    Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
    HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
    FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
    FileSystem fs = new HFileSystem(faultyfs);
    CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
    HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
    TestStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
    StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE);
    StoreFileReader reader = sf.createReader();
    HFileScanner scanner = reader.getScanner(false, true);
    FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
    assertNotNull(inStream);
    scanner.seekTo();
    // Do at least one successful read
    assertTrue(scanner.next());
    faultyfs.startFaults();
    try {
        int scanned = 0;
        while (scanner.next()) {
            scanned++;
        }
        fail("Scanner didn't throw after faults injected");
    } catch (IOException ioe) {
        LOG.info("Got expected exception", ioe);
        assertTrue(ioe.getMessage().contains("Fault"));
    }
    // end of test so evictOnClose
    reader.close(true);
}
Also used : Path(org.apache.hadoop.fs.Path) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) IOException(java.io.IOException) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Aggregations

CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)63 Path (org.apache.hadoop.fs.Path)28 Test (org.junit.Test)26 Configuration (org.apache.hadoop.conf.Configuration)21 HFile (org.apache.hadoop.hbase.io.hfile.HFile)21 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)21 FileSystem (org.apache.hadoop.fs.FileSystem)20 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)20 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)15 KeyValue (org.apache.hadoop.hbase.KeyValue)14 TableName (org.apache.hadoop.hbase.TableName)14 Region (org.apache.hadoop.hbase.regionserver.Region)13 Store (org.apache.hadoop.hbase.regionserver.Store)13 Cell (org.apache.hadoop.hbase.Cell)10 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)10 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)10 IOException (java.io.IOException)9 CountDownLatch (java.util.concurrent.CountDownLatch)8 FileStatus (org.apache.hadoop.fs.FileStatus)8 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)8