use of java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock in project zookeeper by apache.
the class ZKDatabase method addCommittedProposal.
/**
* maintains a list of last <i>committedLog</i>
* or so committed requests. This is used for
* fast follower synchronization.
* @param request committed request
*/
public void addCommittedProposal(Request request) {
WriteLock wl = logLock.writeLock();
try {
wl.lock();
if (committedLog.size() > commitLogCount) {
committedLog.removeFirst();
minCommittedLog = committedLog.getFirst().packet.getZxid();
}
if (committedLog.isEmpty()) {
minCommittedLog = request.zxid;
maxCommittedLog = request.zxid;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BinaryOutputArchive boa = BinaryOutputArchive.getArchive(baos);
try {
request.getHdr().serialize(boa, "hdr");
if (request.getTxn() != null) {
request.getTxn().serialize(boa, "txn");
}
baos.close();
} catch (IOException e) {
LOG.error("This really should be impossible", e);
}
QuorumPacket pp = new QuorumPacket(Leader.PROPOSAL, request.zxid, baos.toByteArray(), null);
Proposal p = new Proposal();
p.packet = pp;
p.request = request;
committedLog.add(p);
maxCommittedLog = p.packet.getZxid();
} finally {
wl.unlock();
}
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock in project torodb by torodb.
the class ExclusiveWriteInternalTransaction method createExclusiveWriteTransaction.
static ExclusiveWriteInternalTransaction createExclusiveWriteTransaction(BackendConnection backendConnection, MetainfoRepository metainfoRepository) {
WriteLock exclusiveLock = exclusiveLock();
exclusiveLock.lock();
try {
return createWriteTransaction(metainfoRepository, snapshot -> new ExclusiveWriteInternalTransaction(metainfoRepository, snapshot, backendConnection.openExclusiveWriteTransaction(), exclusiveLock));
} catch (Throwable throwable) {
exclusiveLock.unlock();
throw throwable;
}
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock in project zm-mailbox by Zimbra.
the class RedoLogManager method acquireExclusiveLock.
/**
* Acquires an exclusive lock on the log manager. When the log manager
* is locked this way, it is guaranteed that no thread is in the act
* of logging or doing a log rollover. In other words, the logs are
* quiesced.
*
* The thread calling this method must later release the lock by calling
* releaseExclusiveLock() method and passing the Sync object that was
* returned by this method.
*
* @return the Sync object to be used later to release the lock
* @throws InterruptedException
*/
protected WriteLock acquireExclusiveLock() throws InterruptedException {
WriteLock writeLock = mRWLock.writeLock();
writeLock.lockInterruptibly();
return writeLock;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock in project zm-mailbox by Zimbra.
the class RedoLogManager method rollover.
/**
* Do a log rollover if necessary. If force is true, rollover occurs if
* log is non-empty. If force is false, rollover happens only when it's
* needed according to isRolloverNeeded().
* @param force
* @param skipCheckpoint if true, skips writing Checkpoint entry at end of file
* @return java.io.File object for rolled over file; null if no rollover occurred
*/
protected File rollover(boolean force, boolean skipCheckpoint) {
if (!mEnabled)
return null;
File rolledOverFile = null;
// Grab a write lock on mRWLock. No thread will be
// able to log a new item until rollover is done.
WriteLock writeLock = mRWLock.writeLock();
try {
writeLock.lockInterruptibly();
} catch (InterruptedException e) {
synchronized (mShuttingDownGuard) {
if (!mShuttingDown)
ZimbraLog.redolog.error("InterruptedException during log rollover", e);
else
ZimbraLog.redolog.debug("Rollover interrupted during shutdown");
}
return rolledOverFile;
}
try {
if (isRolloverNeeded(force)) {
ZimbraLog.redolog.debug("Redo log rollover started");
long start = System.currentTimeMillis();
// Force the database to persist the committed changes to disk.
// This is very important when running mysql with innodb_flush_log_at_trx_commit=0 (or 2).
Db.getInstance().flushToDisk();
if (!skipCheckpoint)
checkpoint();
synchronized (mActiveOps) {
rolledOverFile = mLogWriter.rollover(mActiveOps);
mInitialLogSize = mLogWriter.getSize();
}
long elapsed = System.currentTimeMillis() - start;
ZimbraLog.redolog.info("Redo log rollover took " + elapsed + "ms");
}
} catch (IOException e) {
ZimbraLog.redolog.error("IOException during redo log rollover");
signalFatalError(e);
} finally {
writeLock.unlock();
}
/* TODO: Finish implementing Rollover as a replicated op.
* Checking in this partial code to work on something else.
if (rolledOverFile != null) {
ZimbraLog.redolog.info("Rollover: " + rolledOverFile.getName());
// Log rollover marker to redolog stream.
Rollover ro = new Rollover(rolledOverFile);
ro.start(System.currentTimeMillis());
logOnly(ro, false); // Don't call log() as it may call rollover() in infinite loop.
CommitTxn commit = new CommitTxn(ro);
logOnly(commit, true);
}
*/
return rolledOverFile;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock in project jgnash by ccavanaugh.
the class AbstractExpandingTableModel method removeNode.
protected void removeNode(final E object) {
WriteLock writeLock = rwl.writeLock();
writeLock.lock();
try {
ExpandingTableNode<E> node = getNode(object);
if (node != null) {
objects.remove(object);
visibleObjects.remove(node);
keys.remove(object);
fireTableDataChanged();
}
} finally {
writeLock.unlock();
}
}
Aggregations