use of java.util.concurrent.Semaphore in project tomcat by apache.
the class SemaphoreValve method startInternal.
/**
* Start this component and implement the requirements
* of {@link org.apache.catalina.util.LifecycleBase#startInternal()}.
*
* @exception LifecycleException if this component detects a fatal error
* that prevents this component from being used
*/
@Override
protected synchronized void startInternal() throws LifecycleException {
semaphore = new Semaphore(concurrency, fairness);
setState(LifecycleState.STARTING);
}
use of java.util.concurrent.Semaphore in project zookeeper by apache.
the class DataTreeTest method testSerializeDoesntLockDataNodeWhileWriting.
/*
* ZOOKEEPER-2201 - OutputArchive.writeRecord can block for long periods of
* time, we must call it outside of the node lock.
* We call tree.serialize, which calls our modified writeRecord method that
* blocks until it can verify that a separate thread can lock the DataNode
* currently being written, i.e. that DataTree.serializeNode does not hold
* the DataNode lock while calling OutputArchive.writeRecord.
*/
@Test(timeout = 60000)
public void testSerializeDoesntLockDataNodeWhileWriting() throws Exception {
DataTree tree = new DataTree();
tree.createNode("/marker", new byte[] { 42 }, null, -1, 1, 1, 1);
final DataNode markerNode = tree.getNode("/marker");
final AtomicBoolean ranTestCase = new AtomicBoolean();
DataOutputStream out = new DataOutputStream(new ByteArrayOutputStream());
BinaryOutputArchive oa = new BinaryOutputArchive(out) {
@Override
public void writeRecord(Record r, String tag) throws IOException {
// which adds default ACL to config node.
if (r instanceof DataNode) {
DataNode node = (DataNode) r;
if (node.data.length == 1 && node.data[0] == 42) {
final Semaphore semaphore = new Semaphore(0);
new Thread(new Runnable() {
@Override
public void run() {
synchronized (markerNode) {
//When we lock markerNode, allow writeRecord to continue
semaphore.release();
}
}
}).start();
try {
boolean acquired = semaphore.tryAcquire(30, TimeUnit.SECONDS);
//This is the real assertion - could another thread lock
//the DataNode we're currently writing
Assert.assertTrue("Couldn't acquire a lock on the DataNode while we were calling tree.serialize", acquired);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
ranTestCase.set(true);
}
}
super.writeRecord(r, tag);
}
};
tree.serialize(oa, "test");
//Let's make sure that we hit the code that ran the real assertion above
Assert.assertTrue("Didn't find the expected node", ranTestCase.get());
}
use of java.util.concurrent.Semaphore in project zookeeper by apache.
the class FollowerResyncConcurrencyTest method testResyncByDiffAfterFollowerCrashes.
/**
* This test:
* Starts up 3 ZKs. The non-leader ZKs are writing to cluster
* Shut down one of the non-leader ZKs.
* Restart after sessions have expired but <500 txns have taken place (get a diff)
* Shut down immediately after restarting, start running separate thread with other transactions
* Restart to a diff while transactions are running in leader
*
*
* Before fixes for ZOOKEEPER-962, restarting off of diff could get an inconsistent view of data missing transactions that
* completed during diff syncing. Follower would also be considered "restarted" before all forwarded transactions
* were completely processed, so restarting would cause a snap file with a too-high zxid to be written, and transactions
* would be missed
*
* This test should pretty reliably catch the failure of restarting the server before all diff messages have been processed,
* however, due to the transient nature of the system it may not catch failures due to concurrent processing of transactions
* during the leader's diff forwarding.
*
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws Throwable
*/
@Test
public void testResyncByDiffAfterFollowerCrashes() throws IOException, InterruptedException, KeeperException, Throwable {
final Semaphore sem = new Semaphore(0);
QuorumUtil qu = new QuorumUtil(1);
qu.startAll();
CountdownWatcher watcher1 = new CountdownWatcher();
CountdownWatcher watcher2 = new CountdownWatcher();
CountdownWatcher watcher3 = new CountdownWatcher();
int index = 1;
while (qu.getPeer(index).peer.leader == null) {
index++;
}
Leader leader = qu.getPeer(index).peer.leader;
assertNotNull(leader);
/* Reusing the index variable to select a follower to connect to */
index = (index == 1) ? 2 : 1;
LOG.info("Connecting to follower: {}", index);
final ZooKeeper zk1 = createClient(qu.getPeer(index).peer.getClientPort(), watcher1);
LOG.info("zk1 has session id 0x{}", Long.toHexString(zk1.getSessionId()));
final ZooKeeper zk2 = createClient(qu.getPeer(index).peer.getClientPort(), watcher2);
LOG.info("zk2 has session id 0x{}", Long.toHexString(zk2.getSessionId()));
final ZooKeeper zk3 = createClient(qu.getPeer(3).peer.getClientPort(), watcher3);
LOG.info("zk3 has session id 0x{}", Long.toHexString(zk3.getSessionId()));
zk1.create("/first", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
zk2.create("/mybar", null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
final AtomicBoolean runNow = new AtomicBoolean(false);
Thread mytestfooThread = new Thread(new Runnable() {
@Override
public void run() {
int inSyncCounter = 0;
while (inSyncCounter < 400) {
if (runNow.get()) {
zk3.create("/mytestfoo", null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, new AsyncCallback.StringCallback() {
@Override
public void processResult(int rc, String path, Object ctx, String name) {
pending.decrementAndGet();
counter.incrementAndGet();
if (rc != 0) {
errors.incrementAndGet();
;
}
if (counter.get() > 7300) {
sem.release();
}
}
}, null);
pending.incrementAndGet();
try {
Thread.sleep(10);
} catch (Exception e) {
}
inSyncCounter++;
} else {
Thread.yield();
}
}
}
});
mytestfooThread.start();
for (int i = 0; i < 5000; i++) {
zk2.create("/mybar", null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, new AsyncCallback.StringCallback() {
@Override
public void processResult(int rc, String path, Object ctx, String name) {
pending.decrementAndGet();
counter.incrementAndGet();
if (rc != 0) {
errors.incrementAndGet();
;
}
if (counter.get() > 7300) {
sem.release();
}
}
}, null);
pending.incrementAndGet();
if (i == 1000) {
qu.shutdown(index);
Thread.sleep(1100);
LOG.info("Shutting down s1");
}
if (i == 1100 || i == 1150 || i == 1200) {
Thread.sleep(1000);
}
if (i == 1200) {
qu.startThenShutdown(index);
runNow.set(true);
qu.restart(index);
LOG.info("Setting up server: {}", index);
}
if (i >= 1000 && i % 2 == 0) {
zk3.create("/newbaz", null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, new AsyncCallback.StringCallback() {
@Override
public void processResult(int rc, String path, Object ctx, String name) {
pending.decrementAndGet();
counter.incrementAndGet();
if (rc != 0) {
errors.incrementAndGet();
}
if (counter.get() > 7300) {
sem.release();
}
}
}, null);
pending.incrementAndGet();
}
if (i == 1050 || i == 1100 || i == 1150) {
Thread.sleep(1000);
}
}
// Wait until all updates return
if (!sem.tryAcquire(ClientBase.CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS)) {
LOG.warn("Did not aquire semaphore fast enough");
}
mytestfooThread.join(ClientBase.CONNECTION_TIMEOUT);
if (mytestfooThread.isAlive()) {
LOG.error("mytestfooThread is still alive");
}
assertTrue(waitForPendingRequests(60));
assertTrue(waitForSync(qu, index, 10));
// Verify that server is following and has the same epoch as the leader
verifyState(qu, index, leader);
zk1.close();
zk2.close();
zk3.close();
qu.shutdownAll();
}
use of java.util.concurrent.Semaphore in project zookeeper by apache.
the class FLENewEpochTest method setUp.
@Before
public void setUp() throws Exception {
count = 3;
peers = new HashMap<Long, QuorumServer>(count);
threads = new ArrayList<LEThread>(count);
tmpdir = new File[count];
port = new int[count];
round = new int[3];
round[0] = 0;
round[1] = 0;
round[2] = 0;
start0 = new Semaphore(0);
finish0 = new Semaphore(0);
finish3 = new Semaphore(0);
}
use of java.util.concurrent.Semaphore in project zookeeper by apache.
the class FLERestartTest method setUp.
@Before
public void setUp() throws Exception {
count = 3;
peers = new HashMap<Long, QuorumServer>(count);
restartThreads = new ArrayList<FLERestartThread>(count);
tmpdir = new File[count];
port = new int[count];
finish = new Semaphore(0);
}
Aggregations