use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class CheckPointerImplTest method tryCheckPointMustWaitForOnGoingCheckPointsToCompleteAsLongAsTimeoutPredicateIsFalse.
@Test
void tryCheckPointMustWaitForOnGoingCheckPointsToCompleteAsLongAsTimeoutPredicateIsFalse() throws Exception {
mockTxIdStore();
CheckPointerImpl checkPointer = checkPointer();
BinaryLatch arriveFlushAndForce = new BinaryLatch();
BinaryLatch finishFlushAndForce = new BinaryLatch();
doAnswer(invocation -> {
arriveFlushAndForce.release();
finishFlushAndForce.await();
return null;
}).when(forceOperation).flushAndForce(any());
Thread forceCheckPointThread = new Thread(() -> {
try {
checkPointer.forceCheckPoint(INFO);
} catch (Throwable e) {
e.printStackTrace();
throw new RuntimeException(e);
}
});
forceCheckPointThread.start();
// Wait for force-thread to arrive in flushAndForce().
arriveFlushAndForce.await();
BooleanSupplier predicate = mock(BooleanSupplier.class);
when(predicate.getAsBoolean()).thenReturn(false, false, true);
// We decided to not wait for the on-going check point to finish.
assertThat(checkPointer.tryCheckPoint(INFO, predicate)).isEqualTo(-1L);
// Let the flushAndForce complete.
finishFlushAndForce.release();
forceCheckPointThread.join();
assertThat(checkPointer.tryCheckPoint(INFO, predicate)).isEqualTo(this.transactionId);
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class CentralJobSchedulerTest method shouldListActiveGroups.
@Test
void shouldListActiveGroups() {
life.start();
assertEquals(List.of(), scheduler.activeGroups().map(ag -> ag.group).collect(toList()));
BinaryLatch firstLatch = new BinaryLatch();
scheduler.schedule(Group.CHECKPOINT, NOT_MONITORED, firstLatch::release);
firstLatch.await();
assertEquals(List.of(Group.CHECKPOINT), scheduler.activeGroups().map(ag -> ag.group).collect(toList()));
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class CentralJobSchedulerTest method longRunningScheduledJobsMustNotDelayOtherLongRunningJobs.
@Test
void longRunningScheduledJobsMustNotDelayOtherLongRunningJobs() {
life.start();
List<JobHandle<?>> handles = new ArrayList<>(30);
AtomicLong startedCounter = new AtomicLong();
BinaryLatch blockLatch = new BinaryLatch();
Runnable task = () -> {
startedCounter.incrementAndGet();
blockLatch.await();
};
for (int i = 0; i < 10; i++) {
handles.add(scheduler.schedule(Group.INDEX_POPULATION, NOT_MONITORED, task, 0, TimeUnit.MILLISECONDS));
}
for (int i = 0; i < 10; i++) {
handles.add(scheduler.scheduleRecurring(Group.INDEX_POPULATION, NOT_MONITORED, task, Integer.MAX_VALUE, TimeUnit.MILLISECONDS));
}
for (int i = 0; i < 10; i++) {
handles.add(scheduler.scheduleRecurring(Group.INDEX_POPULATION, NOT_MONITORED, task, 0, Integer.MAX_VALUE, TimeUnit.MILLISECONDS));
}
long deadline = TimeUnit.SECONDS.toNanos(10) + System.nanoTime();
do {
if (startedCounter.get() == handles.size()) {
// All jobs got started. We're good!
blockLatch.release();
for (JobHandle<?> handle : handles) {
handle.cancel();
}
return;
}
} while (System.nanoTime() < deadline);
fail("Only managed to start " + startedCounter.get() + " tasks in 10 seconds, when " + handles.size() + " was expected.");
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class FulltextProceduresTest method concurrentPopulationAndUpdatesToAnEventuallyConsistentIndexMustEventuallyResultInCorrectIndexState.
@Test
void concurrentPopulationAndUpdatesToAnEventuallyConsistentIndexMustEventuallyResultInCorrectIndexState() throws Exception {
String oldValue = "red";
String newValue = "green";
// First we create the nodes and relationships with the property value "red".
LongHashSet nodeIds = new LongHashSet();
LongHashSet relIds = new LongHashSet();
generateNodesAndRelationshipsWithProperty(200, nodeIds, relIds, oldValue);
// Then, in two concurrent transactions, we create our indexes AND change all the property values to "green".
CountDownLatch readyLatch = new CountDownLatch(2);
BinaryLatch startLatch = new BinaryLatch();
Runnable createIndexes = () -> {
readyLatch.countDown();
startLatch.await();
try (Transaction tx = db.beginTx()) {
tx.execute(format(NODE_CREATE, DEFAULT_NODE_IDX_NAME, asStrList(LABEL.name()), asStrList(PROP) + EVENTUALLY_CONSISTENT));
tx.execute(format(RELATIONSHIP_CREATE, DEFAULT_REL_IDX_NAME, asStrList(REL.name()), asStrList(PROP) + EVENTUALLY_CONSISTENT));
tx.commit();
}
};
Runnable makeAllEntitiesGreen = () -> {
try (Transaction tx = db.beginTx()) {
// Prepare our transaction state first.
nodeIds.forEach(nodeId -> tx.getNodeById(nodeId).setProperty(PROP, newValue));
relIds.forEach(relId -> tx.getRelationshipById(relId).setProperty(PROP, newValue));
// Okay, NOW we're ready to race!
readyLatch.countDown();
startLatch.await();
tx.commit();
}
};
ExecutorService executor = Executors.newFixedThreadPool(2);
Future<?> future1 = executor.submit(createIndexes);
Future<?> future2 = executor.submit(makeAllEntitiesGreen);
readyLatch.await();
startLatch.release();
// Finally, when everything has settled down, we should see that all of the nodes and relationships are indexed with the value "green".
try {
future1.get();
future2.get();
awaitIndexesOnline();
try (Transaction tx = db.beginTx()) {
tx.execute(AWAIT_REFRESH).close();
}
assertQueryFindsIds(db, true, DEFAULT_NODE_IDX_NAME, newValue, nodeIds);
assertQueryFindsIds(db, false, DEFAULT_REL_IDX_NAME, newValue, relIds);
} finally {
IOUtils.closeAllSilently(executor::shutdown);
}
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class ForsetiFalseDeadlockTest method runTest.
private static void runTest(Fixture fixture) throws InterruptedException, java.util.concurrent.ExecutionException {
int iterations = fixture.iterations();
ResourceType resourceType = fixture.createResourceType();
Locks manager = fixture.createLockManager(resourceType);
try (Locks.Client a = manager.newClient();
Locks.Client b = manager.newClient()) {
a.initialize(NoLeaseClient.INSTANCE, 1, EmptyMemoryTracker.INSTANCE, Config.defaults());
b.initialize(NoLeaseClient.INSTANCE, 2, EmptyMemoryTracker.INSTANCE, Config.defaults());
BinaryLatch startLatch = new BinaryLatch();
BlockedCallable callA = new BlockedCallable(startLatch, () -> workloadA(fixture, a, resourceType, iterations));
BlockedCallable callB = new BlockedCallable(startLatch, () -> workloadB(fixture, b, resourceType, iterations));
Future<Void> futureA = executor.submit(callA);
Future<Void> futureB = executor.submit(callB);
callA.awaitBlocked();
callB.awaitBlocked();
startLatch.release();
futureA.get();
futureB.get();
} finally {
manager.close();
}
}
Aggregations