use of java.util.concurrent.locks.ReentrantReadWriteLock in project hbase by apache.
the class BucketCache method getBlock.
/**
* Get the buffer of the block with the specified key.
* @param key block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block
* @param updateCacheMetrics Whether we should update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, boolean updateCacheMetrics) {
if (!cacheEnabled) {
return null;
}
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
}
re.access(accessCount.incrementAndGet());
return re.getData();
}
BucketEntry bucketEntry = backingMap.get(key);
if (bucketEntry != null) {
long start = System.nanoTime();
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset());
try {
lock.readLock().lock();
// existence here.
if (bucketEntry.equals(backingMap.get(key))) {
// TODO : change this area - should be removed after server cells and
// 12295 are available
int len = bucketEntry.getLength();
if (LOG.isTraceEnabled()) {
LOG.trace("Read offset=" + bucketEntry.offset() + ", len=" + len);
}
Cacheable cachedBlock = ioEngine.read(bucketEntry.offset(), len, bucketEntry.deserializerReference(this.deserialiserMap));
long timeTaken = System.nanoTime() - start;
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
cacheStats.ioHit(timeTaken);
}
if (cachedBlock.getMemoryType() == MemoryType.SHARED) {
bucketEntry.refCount.incrementAndGet();
}
bucketEntry.access(accessCount.incrementAndGet());
if (this.ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
return cachedBlock;
}
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
checkIOErrorIsTolerated();
} finally {
lock.readLock().unlock();
}
}
if (!repeat && updateCacheMetrics) {
cacheStats.miss(caching, key.isPrimary(), key.getBlockType());
}
return null;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project hbase by apache.
the class BucketCache method evictBlock.
public boolean evictBlock(BlockCacheKey cacheKey, boolean deletedBlock) {
if (!cacheEnabled) {
return false;
}
RAMQueueEntry removedBlock = checkRamCache(cacheKey);
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry == null) {
if (removedBlock != null) {
cacheStats.evicted(0, cacheKey.isPrimary());
return true;
} else {
return false;
}
}
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset());
try {
lock.writeLock().lock();
int refCount = bucketEntry.refCount.get();
if (refCount == 0) {
if (backingMap.remove(cacheKey, bucketEntry)) {
blockEvicted(cacheKey, bucketEntry, removedBlock == null);
} else {
return false;
}
} else {
if (!deletedBlock) {
if (LOG.isDebugEnabled()) {
LOG.debug("This block " + cacheKey + " is still referred by " + refCount + " readers. Can not be freed now");
}
return false;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("This block " + cacheKey + " is still referred by " + refCount + " readers. Can not be freed now. Hence will mark this" + " for evicting at a later point");
}
bucketEntry.markedForEvict = true;
}
}
} finally {
lock.writeLock().unlock();
}
cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
return true;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project hadoop by apache.
the class TestProportionalCapacityPreemptionPolicy method mockParentQueue.
ParentQueue mockParentQueue(ParentQueue p, int subqueues, Deque<ParentQueue> pqs) {
ParentQueue pq = mock(ParentQueue.class);
List<CSQueue> cqs = new ArrayList<CSQueue>();
when(pq.getChildQueues()).thenReturn(cqs);
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
when(pq.getReadLock()).thenReturn(lock.readLock());
// Ordering policy
QueueOrderingPolicy policy = mock(QueueOrderingPolicy.class);
when(policy.getConfigName()).thenReturn(CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
when(pq.getQueueOrderingPolicy()).thenReturn(policy);
when(pq.getPriority()).thenReturn(Priority.newInstance(0));
for (int i = 0; i < subqueues; ++i) {
pqs.add(pq);
}
if (p != null) {
p.getChildQueues().add(pq);
}
return pq;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project hive by apache.
the class QueryTracker method getDagLock.
private ReadWriteLock getDagLock(QueryIdentifier queryIdentifier) {
lock.lock();
try {
ReadWriteLock dagLock = dagSpecificLocks.get(queryIdentifier);
if (dagLock == null) {
dagLock = new ReentrantReadWriteLock();
dagSpecificLocks.put(queryIdentifier, dagLock);
}
return dagLock;
} finally {
lock.unlock();
}
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project gocd by gocd.
the class DynamicReadWriteLock method getLock.
private ReadWriteLock getLock(String key) {
synchronized (key.intern()) {
if (locks.containsKey(key)) {
return locks.get(key);
}
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
locks.put(key, lock);
return lock;
}
}
Aggregations