use of java.util.concurrent.locks.ReentrantReadWriteLock in project hbase by apache.
the class BucketCache method forceEvict.
// does not check for the ref count. Just tries to evict it if found in the
// bucket map
private boolean forceEvict(BlockCacheKey cacheKey) {
if (!cacheEnabled) {
return false;
}
RAMQueueEntry removedBlock = checkRamCache(cacheKey);
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry == null) {
if (removedBlock != null) {
cacheStats.evicted(0, cacheKey.isPrimary());
return true;
} else {
return false;
}
}
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset());
try {
lock.writeLock().lock();
if (backingMap.remove(cacheKey, bucketEntry)) {
blockEvicted(cacheKey, bucketEntry, removedBlock == null);
} else {
return false;
}
} finally {
lock.writeLock().unlock();
}
cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
return true;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project hbase by apache.
the class IdReadWriteLock method waitForWaiters.
@VisibleForTesting
public void waitForWaiters(long id, int numWaiters) throws InterruptedException {
for (ReentrantReadWriteLock readWriteLock; ; ) {
readWriteLock = lockPool.get(id);
if (readWriteLock != null) {
synchronized (readWriteLock) {
if (readWriteLock.getQueueLength() >= numWaiters) {
return;
}
}
}
Thread.sleep(50);
}
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project zookeeper by apache.
the class LearnerHandler method syncFollower.
/**
* Determine if we need to sync with follower using DIFF/TRUNC/SNAP
* and setup follower to receive packets from commit processor
*
* @param peerLastZxid
* @param db
* @param leader
* @return true if snapshot transfer is needed.
*/
public boolean syncFollower(long peerLastZxid, ZKDatabase db, Leader leader) {
/*
* When leader election is completed, the leader will set its
* lastProcessedZxid to be (epoch < 32). There will be no txn associated
* with this zxid.
*
* The learner will set its lastProcessedZxid to the same value if
* it get DIFF or SNAP from the leader. If the same learner come
* back to sync with leader using this zxid, we will never find this
* zxid in our history. In this case, we will ignore TRUNC logic and
* always send DIFF if we have old enough history
*/
boolean isPeerNewEpochZxid = (peerLastZxid & 0xffffffffL) == 0;
// Keep track of the latest zxid which already queued
long currentZxid = peerLastZxid;
boolean needSnap = true;
boolean txnLogSyncEnabled = (db.getSnapshotSizeFactor() >= 0);
ReentrantReadWriteLock lock = db.getLogLock();
ReadLock rl = lock.readLock();
try {
rl.lock();
long maxCommittedLog = db.getmaxCommittedLog();
long minCommittedLog = db.getminCommittedLog();
long lastProcessedZxid = db.getDataTreeLastProcessedZxid();
LOG.info("Synchronizing with Follower sid: {} maxCommittedLog=0x{}" + " minCommittedLog=0x{} lastProcessedZxid=0x{}" + " peerLastZxid=0x{}", getSid(), Long.toHexString(maxCommittedLog), Long.toHexString(minCommittedLog), Long.toHexString(lastProcessedZxid), Long.toHexString(peerLastZxid));
if (db.getCommittedLog().isEmpty()) {
/*
* It is possible that commitedLog is empty. In that case
* setting these value to the latest txn in leader db
* will reduce the case that we need to handle
*
* Here is how each case handle by the if block below
* 1. lastProcessZxid == peerZxid -> Handle by (2)
* 2. lastProcessZxid < peerZxid -> Handle by (3)
* 3. lastProcessZxid > peerZxid -> Handle by (5)
*/
minCommittedLog = lastProcessedZxid;
maxCommittedLog = lastProcessedZxid;
}
if (forceSnapSync) {
// Force leader to use snapshot to sync with follower
LOG.warn("Forcing snapshot sync - should not see this in production");
} else if (lastProcessedZxid == peerLastZxid) {
// Follower is already sync with us, send empty diff
LOG.info("Sending DIFF zxid=0x" + Long.toHexString(peerLastZxid) + " for peer sid: " + getSid());
queueOpPacket(Leader.DIFF, peerLastZxid);
needOpPacket = false;
needSnap = false;
} else if (peerLastZxid > maxCommittedLog && !isPeerNewEpochZxid) {
// Newer than commitedLog, send trunc and done
LOG.debug("Sending TRUNC to follower zxidToSend=0x" + Long.toHexString(maxCommittedLog) + " for peer sid:" + getSid());
queueOpPacket(Leader.TRUNC, maxCommittedLog);
currentZxid = maxCommittedLog;
needOpPacket = false;
needSnap = false;
} else if ((maxCommittedLog >= peerLastZxid) && (minCommittedLog <= peerLastZxid)) {
// Follower is within commitLog range
LOG.info("Using committedLog for peer sid: " + getSid());
Iterator<Proposal> itr = db.getCommittedLog().iterator();
currentZxid = queueCommittedProposals(itr, peerLastZxid, null, maxCommittedLog);
needSnap = false;
} else if (peerLastZxid < minCommittedLog && txnLogSyncEnabled) {
// Use txnlog and committedLog to sync
// Calculate sizeLimit that we allow to retrieve txnlog from disk
long sizeLimit = db.calculateTxnLogSizeLimit();
// This method can return empty iterator if the requested zxid
// is older than on-disk txnlog
Iterator<Proposal> txnLogItr = db.getProposalsFromTxnLog(peerLastZxid, sizeLimit);
if (txnLogItr.hasNext()) {
LOG.info("Use txnlog and committedLog for peer sid: " + getSid());
currentZxid = queueCommittedProposals(txnLogItr, peerLastZxid, minCommittedLog, maxCommittedLog);
LOG.debug("Queueing committedLog 0x" + Long.toHexString(currentZxid));
Iterator<Proposal> committedLogItr = db.getCommittedLog().iterator();
currentZxid = queueCommittedProposals(committedLogItr, currentZxid, null, maxCommittedLog);
needSnap = false;
}
// closing the resources
if (txnLogItr instanceof TxnLogProposalIterator) {
TxnLogProposalIterator txnProposalItr = (TxnLogProposalIterator) txnLogItr;
txnProposalItr.close();
}
} else {
LOG.warn("Unhandled scenario for peer sid: " + getSid());
}
LOG.debug("Start forwarding 0x" + Long.toHexString(currentZxid) + " for peer sid: " + getSid());
leaderLastZxid = leader.startForwarding(this, currentZxid);
} finally {
rl.unlock();
}
if (needOpPacket && !needSnap) {
// This should never happen, but we should fall back to sending
// snapshot just in case.
LOG.error("Unhandled scenario for peer sid: " + getSid() + " fall back to use snapshot");
needSnap = true;
}
return needSnap;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project atlas by alibaba.
the class BundleLock method ReadUnLock.
public static void ReadUnLock(String bundle) {
ReentrantReadWriteLock lock = null;
synchronized (bundleIdentifierMap) {
lock = bundleIdentifierMap.get(bundle);
if (lock == null) {
return;
}
}
lock.readLock().unlock();
}
use of java.util.concurrent.locks.ReentrantReadWriteLock in project ignite by apache.
the class ReadWriteLockMultiThreadedTest method testReadLockAcquire.
/**
* @throws Exception If failed.
*/
@SuppressWarnings({ "LockAcquiredButNotSafelyReleased" })
public void testReadLockAcquire() throws Exception {
final ReadWriteLock lock = new ReentrantReadWriteLock();
lock.writeLock().lock();
X.println("Write lock acquired: " + lock);
IgniteInternalFuture fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Nullable
@Override
public Object call() throws Exception {
X.println("Attempting to acquire read lock: " + lock);
lock.readLock().lock();
try {
X.println("Read lock acquired: " + lock);
return null;
} finally {
lock.readLock().unlock();
}
}
}, 1, "read-lock");
Thread.sleep(2000);
X.println(">>> Dump threads now! <<<");
Thread.sleep(15 * 1000);
X.println("Write lock released.");
lock.writeLock().unlock();
fut.get();
}
Aggregations