use of java.util.concurrent.CyclicBarrier in project lucene-solr by apache.
the class BaseLockFactoryTestCase method testObtainConcurrently.
public void testObtainConcurrently() throws InterruptedException, IOException {
Path tempPath = createTempDir();
final Directory directory = getDirectory(tempPath);
final AtomicBoolean running = new AtomicBoolean(true);
final AtomicInteger atomicCounter = new AtomicInteger(0);
final ReentrantLock assertingLock = new ReentrantLock();
int numThreads = 2 + random().nextInt(10);
final int runs = atLeast(10000);
CyclicBarrier barrier = new CyclicBarrier(numThreads);
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
barrier.await();
} catch (Exception e) {
throw new RuntimeException(e);
}
while (running.get()) {
try (Lock lock = directory.obtainLock("foo.lock")) {
assertFalse(assertingLock.isLocked());
if (assertingLock.tryLock()) {
assertingLock.unlock();
} else {
fail();
}
// stupid compiler
assert lock != null;
} catch (IOException ex) {
//
}
if (atomicCounter.incrementAndGet() > runs) {
running.set(false);
}
}
}
};
threads[i].start();
}
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
directory.close();
}
use of java.util.concurrent.CyclicBarrier in project lucene-solr by apache.
the class TestFieldCache method testGetDocsWithFieldThreadSafety.
public void testGetDocsWithFieldThreadSafety() throws Exception {
final FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
int NUM_THREADS = 3;
Thread[] threads = new Thread[NUM_THREADS];
final AtomicBoolean failed = new AtomicBoolean();
final AtomicInteger iters = new AtomicInteger();
final int NUM_ITER = 200 * RANDOM_MULTIPLIER;
final CyclicBarrier restart = new CyclicBarrier(NUM_THREADS, new Runnable() {
@Override
public void run() {
cache.purgeAllCaches();
iters.incrementAndGet();
}
});
for (int threadIDX = 0; threadIDX < NUM_THREADS; threadIDX++) {
threads[threadIDX] = new Thread() {
@Override
public void run() {
try {
while (!failed.get()) {
final int op = random().nextInt(3);
if (op == 0) {
// Purge all caches & resume, once all
// threads get here:
restart.await();
if (iters.get() >= NUM_ITER) {
break;
}
} else if (op == 1) {
Bits docsWithField = cache.getDocsWithField(reader, "sparse", FieldCache.INT_POINT_PARSER);
for (int i = 0; i < docsWithField.length(); i++) {
assertEquals(i % 2 == 0, docsWithField.get(i));
}
} else {
NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.INT_POINT_PARSER);
for (int i = 0; i < reader.maxDoc(); i++) {
if (i % 2 == 0) {
assertEquals(i, ints.nextDoc());
assertEquals(i, ints.longValue());
}
}
}
}
} catch (Throwable t) {
failed.set(true);
restart.reset();
throw new RuntimeException(t);
}
}
};
threads[threadIDX].start();
}
for (int threadIDX = 0; threadIDX < NUM_THREADS; threadIDX++) {
threads[threadIDX].join();
}
assertFalse(failed.get());
}
use of java.util.concurrent.CyclicBarrier in project ignite by apache.
the class TxOptimisticDeadlockDetectionTest method doTestDeadlock.
/**
* @throws Exception If failed.
*/
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final IgniteClosure<Integer, Object> transformer) throws Exception {
log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", transformer=" + transformer.getClass().getName() + ']');
TestCommunicationSpi.init(txCnt);
final AtomicInteger threadCnt = new AtomicInteger();
final CyclicBarrier barrier = new CyclicBarrier(txCnt);
final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
final List<List<Integer>> keySets = generateKeys(txCnt, loc, !lockPrimaryFirst);
final Set<Integer> involvedKeys = new GridConcurrentHashSet<>();
final Set<Integer> involvedLockedKeys = new GridConcurrentHashSet<>();
final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
int threadNum = threadCnt.incrementAndGet();
Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME);
List<Integer> keys = keySets.get(threadNum - 1);
int txTimeout = 500 + txCnt * 100;
try (Transaction tx = ignite.transactions().txStart(OPTIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
IgniteInternalTx tx0 = ((TransactionProxyImpl) tx).tx();
involvedTxs.add(tx0);
Integer key = keys.get(0);
involvedKeys.add(key);
Object k;
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode().id() + ", tx=" + tx.xid() + ", key=" + transformer.apply(key) + ']');
cache.put(transformer.apply(key), 0);
involvedLockedKeys.add(key);
barrier.await();
key = keys.get(1);
ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
List<Integer> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, key + (100 * threadNum));
Map<Object, Integer> entries = new HashMap<>();
involvedKeys.add(key);
entries.put(transformer.apply(key), 0);
for (Integer i : primaryKeys) {
involvedKeys.add(i);
entries.put(transformer.apply(i), 1);
k = transformer.apply(i + 13);
involvedKeys.add(i + 13);
entries.put(k, 2);
}
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode().id() + ", tx=" + tx.xid() + ", entries=" + entries + ']');
cache.putAll(entries);
tx.commit();
} catch (Throwable e) {
log.info("Expected exception: " + e);
e.printStackTrace(System.out);
// At least one stack trace should contain TransactionDeadlockException.
if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class))) {
log.info("At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName());
e.printStackTrace(System.out);
}
}
}
}
}, loc ? 2 : txCnt, "tx-thread");
try {
fut.get();
} catch (IgniteCheckedException e) {
U.error(null, "Unexpected exception", e);
fail();
}
U.sleep(1000);
TransactionDeadlockException deadlockE = deadlockErr.get();
assertNotNull("Failed to detect deadlock", deadlockE);
boolean fail = false;
// Check transactions, futures and entry locks state.
for (int i = 0; i < NODES_CNT * 2; i++) {
Ignite ignite = ignite(i);
int cacheId = ((IgniteCacheProxy) ignite.cache(CACHE_NAME)).context().cacheId();
GridCacheSharedContext<Object, Object> cctx = ((IgniteKernal) ignite).context().cache().context();
IgniteTxManager txMgr = cctx.tm();
Collection<IgniteInternalTx> activeTxs = txMgr.activeTransactions();
for (IgniteInternalTx tx : activeTxs) {
Collection<IgniteTxEntry> entries = tx.allEntries();
for (IgniteTxEntry entry : entries) {
if (entry.cacheId() == cacheId) {
fail = true;
U.error(log, "Transaction still exists: " + "\n" + tx.xidVersion() + "\n" + tx.nearXidVersion() + "\n nodeId=" + cctx.localNodeId() + "\n tx=" + tx);
}
}
}
Collection<IgniteInternalFuture<?>> futs = txMgr.deadlockDetectionFutures();
assertTrue(futs.isEmpty());
GridCacheAdapter<Object, Integer> intCache = internalCache(i, CACHE_NAME);
GridCacheConcurrentMap map = intCache.map();
for (Integer key : involvedKeys) {
Object key0 = transformer.apply(key);
KeyCacheObject keyCacheObj = intCache.context().toCacheKeyObject(key0);
GridCacheMapEntry entry = map.getEntry(keyCacheObj);
if (entry != null)
assertNull("Entry still has locks " + entry, entry.mvccAllLocal());
}
}
if (fail)
fail("Some transactions still exist");
// Check deadlock report
String msg = deadlockE.getMessage();
for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
for (Integer key : involvedKeys) {
if (involvedLockedKeys.contains(key))
assertTrue(msg.contains("[key=" + transformer.apply(key) + ", cache=" + CACHE_NAME + ']'));
else
assertFalse(msg.contains("[key=" + transformer.apply(key)));
}
}
use of java.util.concurrent.CyclicBarrier in project ignite by apache.
the class TxPessimisticDeadlockDetectionTest method doTestDeadlock.
/**
* @throws Exception If failed.
*/
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final IgniteClosure<Integer, Object> transformer) throws Exception {
log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", transformer=" + transformer.getClass().getName() + ']');
final AtomicInteger threadCnt = new AtomicInteger();
final CyclicBarrier barrier = new CyclicBarrier(txCnt);
final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
final List<List<Integer>> keySets = generateKeys(txCnt, loc, !lockPrimaryFirst);
final Set<Integer> involvedKeys = new GridConcurrentHashSet<>();
final Set<Integer> involvedLockedKeys = new GridConcurrentHashSet<>();
final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
int threadNum = threadCnt.incrementAndGet();
Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME);
List<Integer> keys = keySets.get(threadNum - 1);
int txTimeout = 500 + txCnt * 100;
try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
involvedTxs.add(((TransactionProxyImpl) tx).tx());
Integer key = keys.get(0);
involvedKeys.add(key);
Object k;
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", key=" + transformer.apply(key) + ']');
cache.put(transformer.apply(key), 0);
involvedLockedKeys.add(key);
barrier.await();
key = keys.get(1);
ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
List<Integer> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, key + (100 * threadNum));
Map<Object, Integer> entries = new HashMap<>();
involvedKeys.add(key);
entries.put(transformer.apply(key), 0);
for (Integer i : primaryKeys) {
involvedKeys.add(i);
entries.put(transformer.apply(i), 1);
k = transformer.apply(i + 13);
involvedKeys.add(i + 13);
entries.put(k, 2);
}
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", entries=" + entries + ']');
cache.putAll(entries);
tx.commit();
} catch (Throwable e) {
// At least one stack trace should contain TransactionDeadlockException.
if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class)))
U.error(log, "At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName(), e);
}
}
}
}, loc ? 2 : txCnt, "tx-thread");
try {
fut.get();
} catch (IgniteCheckedException e) {
U.error(null, "Unexpected exception", e);
fail();
}
U.sleep(1000);
TransactionDeadlockException deadlockE = deadlockErr.get();
assertNotNull(deadlockE);
boolean fail = false;
// Check transactions, futures and entry locks state.
for (int i = 0; i < NODES_CNT * 2; i++) {
Ignite ignite = ignite(i);
int cacheId = ((IgniteCacheProxy) ignite.cache(CACHE_NAME)).context().cacheId();
GridCacheSharedContext<Object, Object> cctx = ((IgniteKernal) ignite).context().cache().context();
IgniteTxManager txMgr = cctx.tm();
Collection<IgniteInternalTx> activeTxs = txMgr.activeTransactions();
for (IgniteInternalTx tx : activeTxs) {
Collection<IgniteTxEntry> entries = tx.allEntries();
for (IgniteTxEntry entry : entries) {
if (entry.cacheId() == cacheId) {
fail = true;
U.error(log, "Transaction still exists: " + "\n" + tx.xidVersion() + "\n" + tx.nearXidVersion() + "\n nodeId=" + cctx.localNodeId() + "\n tx=" + tx);
}
}
}
Collection<IgniteInternalFuture<?>> futs = txMgr.deadlockDetectionFutures();
assertTrue(futs.isEmpty());
GridCacheAdapter<Object, Integer> intCache = internalCache(i, CACHE_NAME);
GridCacheConcurrentMap map = intCache.map();
for (Integer key : involvedKeys) {
Object key0 = transformer.apply(key);
KeyCacheObject keyCacheObj = intCache.context().toCacheKeyObject(key0);
GridCacheMapEntry entry = map.getEntry(keyCacheObj);
if (entry != null)
assertNull("Entry still has locks " + entry, entry.mvccAllLocal());
}
}
if (fail)
fail("Some transactions still exist");
// Check deadlock report
String msg = deadlockE.getMessage();
for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
for (Integer key : involvedKeys) {
if (involvedLockedKeys.contains(key))
assertTrue(msg.contains("[key=" + transformer.apply(key) + ", cache=" + CACHE_NAME + ']'));
else
assertFalse(msg.contains("[key=" + transformer.apply(key)));
}
}
use of java.util.concurrent.CyclicBarrier in project geode by apache.
the class AtomicStatsJUnitTest method testConcurrentGets.
/**
* Test for bug 41340. Do two gets at the same time of a dirty stat, and make sure we get the
* correct value for the stat.
*
* @throws Throwable
*/
@Test
public void testConcurrentGets() throws Throwable {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
// props.setProperty("statistic-sample-rate", "60000");
props.setProperty(STATISTIC_SAMPLING_ENABLED, "false");
DistributedSystem ds = DistributedSystem.connect(props);
String statName = "TestStats";
String statDescription = "Tests stats";
final String statDesc = "blah blah blah";
StatisticsTypeFactory f = StatisticsTypeFactoryImpl.singleton();
StatisticsType type = f.createType(statName, statDescription, new StatisticDescriptor[] { f.createIntGauge("stat", statDesc, "bottles of beer on the wall") });
final int statId = type.nameToId("stat");
try {
final AtomicReference<Statistics> statsRef = new AtomicReference<Statistics>();
final CyclicBarrier beforeIncrement = new CyclicBarrier(3);
final CyclicBarrier afterIncrement = new CyclicBarrier(3);
Thread thread1 = new Thread("thread1") {
public void run() {
try {
while (true) {
beforeIncrement.await();
statsRef.get().incInt(statId, 1);
afterIncrement.await();
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (BrokenBarrierException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
Thread thread3 = new Thread("thread1") {
public void run() {
try {
while (true) {
beforeIncrement.await();
afterIncrement.await();
statsRef.get().getInt(statId);
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (BrokenBarrierException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
thread1.start();
thread3.start();
for (int i = 0; i < 5000; i++) {
Statistics stats = ds.createAtomicStatistics(type, "stats");
statsRef.set(stats);
beforeIncrement.await();
afterIncrement.await();
assertEquals("On loop " + i, 1, stats.getInt(statId));
stats.close();
}
} finally {
ds.disconnect();
}
}
Aggregations