Search in sources :

Example 1 with Cacheable

use of org.apache.hadoop.hbase.io.hfile.Cacheable in project hbase by apache.

the class TestBucketWriterThread method testTooBigEntry.

/**
   * Pass through a too big entry and ensure it is cleared from queues and ramCache.
   * Manually run the WriterThread.
   * @throws InterruptedException
   */
@Test
public void testTooBigEntry() throws InterruptedException {
    Cacheable tooBigCacheable = Mockito.mock(Cacheable.class);
    Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE);
    this.bc.cacheBlock(this.plainKey, tooBigCacheable);
    doDrainOfOneEntry(this.bc, this.wt, this.q);
}
Also used : Cacheable(org.apache.hadoop.hbase.io.hfile.Cacheable) Test(org.junit.Test)

Example 2 with Cacheable

use of org.apache.hadoop.hbase.io.hfile.Cacheable in project hbase by apache.

the class BucketCache method getBlock.

/**
   * Get the buffer of the block with the specified key.
   * @param key block's cache key
   * @param caching true if the caller caches blocks on cache misses
   * @param repeat Whether this is a repeat lookup for the same block
   * @param updateCacheMetrics Whether we should update cache metrics or not
   * @return buffer of specified cache key, or null if not in cache
   */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, boolean updateCacheMetrics) {
    if (!cacheEnabled) {
        return null;
    }
    RAMQueueEntry re = ramCache.get(key);
    if (re != null) {
        if (updateCacheMetrics) {
            cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
        }
        re.access(accessCount.incrementAndGet());
        return re.getData();
    }
    BucketEntry bucketEntry = backingMap.get(key);
    if (bucketEntry != null) {
        long start = System.nanoTime();
        ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset());
        try {
            lock.readLock().lock();
            // existence here.
            if (bucketEntry.equals(backingMap.get(key))) {
                // TODO : change this area - should be removed after server cells and
                // 12295 are available
                int len = bucketEntry.getLength();
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Read offset=" + bucketEntry.offset() + ", len=" + len);
                }
                Cacheable cachedBlock = ioEngine.read(bucketEntry.offset(), len, bucketEntry.deserializerReference(this.deserialiserMap));
                long timeTaken = System.nanoTime() - start;
                if (updateCacheMetrics) {
                    cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
                    cacheStats.ioHit(timeTaken);
                }
                if (cachedBlock.getMemoryType() == MemoryType.SHARED) {
                    bucketEntry.refCount.incrementAndGet();
                }
                bucketEntry.access(accessCount.incrementAndGet());
                if (this.ioErrorStartTime > 0) {
                    ioErrorStartTime = -1;
                }
                return cachedBlock;
            }
        } catch (IOException ioex) {
            LOG.error("Failed reading block " + key + " from bucket cache", ioex);
            checkIOErrorIsTolerated();
        } finally {
            lock.readLock().unlock();
        }
    }
    if (!repeat && updateCacheMetrics) {
        cacheStats.miss(caching, key.isPrimary(), key.getBlockType());
    }
    return null;
}
Also used : Cacheable(org.apache.hadoop.hbase.io.hfile.Cacheable) IOException(java.io.IOException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock)

Aggregations

Cacheable (org.apache.hadoop.hbase.io.hfile.Cacheable)2 IOException (java.io.IOException)1 ReentrantReadWriteLock (java.util.concurrent.locks.ReentrantReadWriteLock)1 Test (org.junit.Test)1