use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class CentralJobSchedulerTest method shouldProfileGroup.
@Timeout(value = 20, unit = SECONDS)
@Test
void shouldProfileGroup() throws InterruptedException {
life.start();
BinaryLatch checkpointLatch = new BinaryLatch();
scheduler.schedule(Group.CHECKPOINT, NOT_MONITORED, checkpointLatch::await);
Profiler profiler = Profiler.profiler();
scheduler.profileGroup(Group.CHECKPOINT, profiler);
String printedProfile;
do {
ByteArrayOutputStream bufferOut = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bufferOut);
profiler.printProfile(out, "Test Title");
out.flush();
printedProfile = bufferOut.toString();
} while (!printedProfile.contains("BinaryLatch.await"));
checkpointLatch.release();
profiler.finish();
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class PageCacheTest method flushAndForceMustTolerateAsynchronousFileUnmapping.
@Test
void flushAndForceMustTolerateAsynchronousFileUnmapping() throws Exception {
configureStandardPageCache();
Path a = existingFile("a");
Path b = existingFile("b");
Path c = existingFile("c");
BinaryLatch limiterStartLatch = new BinaryLatch();
BinaryLatch limiterBlockLatch = new BinaryLatch();
var ioController = new EmptyIOController() {
@Override
public void maybeLimitIO(int recentlyCompletedIOs, Flushable flushable, MajorFlushEvent flushEvent) {
limiterStartLatch.release();
limiterBlockLatch.await();
super.maybeLimitIO(recentlyCompletedIOs, flushable, flushEvent);
}
};
Future<?> flusher;
try (PagedFile pfA = pageCache.map(a, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController);
PagedFile pfB = pageCache.map(b, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController);
PagedFile pfC = pageCache.map(c, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController)) {
// Dirty a bunch of pages.
try (PageCursor cursor = pfA.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
assertTrue(cursor.next());
}
try (PageCursor cursor = pfB.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
assertTrue(cursor.next());
}
try (PageCursor cursor = pfC.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
assertTrue(cursor.next());
}
flusher = executor.submit(() -> {
pfA.flushAndForce();
pfB.flushAndForce();
pfC.flushAndForce();
return null;
});
// Flusher is now stuck inside flushAndForce.
limiterStartLatch.await();
}
// We should be able to unmap all the files.
// And then when the flusher resumes again, it should not throw any exceptions from the asynchronously
// closed files.
limiterBlockLatch.release();
// This must not throw.
flusher.get();
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class PageCacheTest method flushAndForceMustNotLockPageCacheForWholeDuration.
@Test
void flushAndForceMustNotLockPageCacheForWholeDuration() {
assertTimeoutPreemptively(ofMillis(SHORT_TIMEOUT_MILLIS), () -> {
maxPages = 5000;
configureStandardPageCache();
Path a = existingFile("a");
Path b = existingFile("b");
BinaryLatch limiterStartLatch = new BinaryLatch();
BinaryLatch limiterBlockLatch = new BinaryLatch();
var ioController = new EmptyIOController() {
@Override
public void maybeLimitIO(int recentlyCompletedIOs, Flushable flushable, MajorFlushEvent flushEvent) {
limiterStartLatch.release();
limiterBlockLatch.await();
super.maybeLimitIO(recentlyCompletedIOs, flushable, flushEvent);
}
};
try (PagedFile pfA = pageCache.map(a, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController)) {
// Dirty a bunch of pages.
try (PageCursor cursor = pfA.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
for (int i = 0; i < maxPages; i++) {
assertTrue(cursor.next());
}
}
Future<?> flusher = executor.submit(() -> {
pfA.flushAndForce();
return null;
});
// Flusher is now stuck inside flushAndForce.
limiterStartLatch.await();
// We should be able to map and close paged files.
map(pageCache, b, filePageSize).close();
// We should be able to get and list existing mappings.
pageCache.listExistingMappings();
pageCache.getExistingMapping(a).ifPresent(PagedFile::close);
limiterBlockLatch.release();
flusher.get();
}
});
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class PageCacheTest method writeLockMustInvalidateExistingReadLock.
@Test
void writeLockMustInvalidateExistingReadLock() {
assertTimeoutPreemptively(ofMillis(SHORT_TIMEOUT_MILLIS), () -> {
configureStandardPageCache();
BinaryLatch startLatch = new BinaryLatch();
BinaryLatch continueLatch = new BinaryLatch();
try (PagedFile pf = map(existingFile("a"), filePageSize);
PageCursor cursor = pf.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
// Ensure that page 0 exists so the read cursor can get it
assertTrue(cursor.next());
// Then unlock it
assertTrue(cursor.next());
Future<Object> read = executor.submit(() -> {
try (PageCursor innerCursor = pf.io(0, PF_SHARED_READ_LOCK, NULL)) {
assertTrue(innerCursor.next());
assertFalse(innerCursor.shouldRetry());
startLatch.release();
continueLatch.await();
assertTrue(innerCursor.shouldRetry());
}
return null;
});
startLatch.await();
// Re-take the write lock on page 0.
assertTrue(cursor.next(0));
continueLatch.release();
read.get();
}
});
}
use of org.neo4j.util.concurrent.BinaryLatch in project neo4j by neo4j.
the class PageCacheTest method writeUnlockMustInvalidateReadLocks.
@Test
void writeUnlockMustInvalidateReadLocks() {
assertTimeoutPreemptively(ofMillis(SHORT_TIMEOUT_MILLIS), () -> {
configureStandardPageCache();
BinaryLatch startLatch = new BinaryLatch();
BinaryLatch continueLatch = new BinaryLatch();
try (PagedFile pf = map(existingFile("a"), filePageSize);
PageCursor cursor = pf.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
// Lock page 0
assertTrue(cursor.next());
Future<Object> read = executor.submit(() -> {
try (PageCursor innerCursor = pf.io(0, PF_SHARED_READ_LOCK, NULL)) {
assertTrue(innerCursor.next());
assertTrue(innerCursor.shouldRetry());
startLatch.release();
continueLatch.await();
assertTrue(innerCursor.shouldRetry());
}
return null;
});
startLatch.await();
// Unlock page 0
assertTrue(cursor.next());
continueLatch.release();
read.get();
}
});
}
Aggregations