use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class MuninnPagedFile method flushAndForce.
@Override
public void flushAndForce(IOLimiter limiter) throws IOException {
if (limiter == null) {
throw new IllegalArgumentException("IOPSLimiter cannot be null");
}
try (MajorFlushEvent flushEvent = pageCacheTracer.beginFileFlush(swapper)) {
flushAndForceInternal(flushEvent.flushEventOpportunity(), false, limiter);
syncDevice();
}
}
use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class MuninnPageCache method flushAndForce.
@Override
public void flushAndForce() throws IOException {
List<PagedFile> files = listExistingMappings();
try (MajorFlushEvent ignored = pageCacheTracer.beginCacheFlush()) {
// When we flush whole page cache it can only happen on shutdown and we should be able to progress as fast as we can with disabled io controller
flushAllPagesParallel(files, IOController.DISABLED);
}
clearEvictorException();
}
use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class MuninnPageCache method flushAllPages.
private void flushAllPages(IOLimiter limiter) throws IOException {
try (MajorFlushEvent cacheFlush = pageCacheTracer.beginCacheFlush()) {
FlushEventOpportunity flushOpportunity = cacheFlush.flushEventOpportunity();
FileMapping fileMapping = mappedFiles;
while (fileMapping != null) {
fileMapping.pagedFile.flushAndForceInternal(flushOpportunity, false, limiter);
fileMapping = fileMapping.next;
}
syncDevice();
}
}
use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class MuninnPagedFile method flushAndForceForClose.
void flushAndForceForClose() throws IOException {
try (MajorFlushEvent flushEvent = pageCacheTracer.beginFileFlush(swapper)) {
flushAndForceInternal(flushEvent.flushEventOpportunity(), true, IOLimiter.unlimited());
syncDevice();
}
}
use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class PageCacheTest method flushAndForceMustTolerateAsynchronousFileUnmapping.
@Test
void flushAndForceMustTolerateAsynchronousFileUnmapping() throws Exception {
configureStandardPageCache();
Path a = existingFile("a");
Path b = existingFile("b");
Path c = existingFile("c");
BinaryLatch limiterStartLatch = new BinaryLatch();
BinaryLatch limiterBlockLatch = new BinaryLatch();
var ioController = new EmptyIOController() {
@Override
public void maybeLimitIO(int recentlyCompletedIOs, Flushable flushable, MajorFlushEvent flushEvent) {
limiterStartLatch.release();
limiterBlockLatch.await();
super.maybeLimitIO(recentlyCompletedIOs, flushable, flushEvent);
}
};
Future<?> flusher;
try (PagedFile pfA = pageCache.map(a, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController);
PagedFile pfB = pageCache.map(b, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController);
PagedFile pfC = pageCache.map(c, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController)) {
// Dirty a bunch of pages.
try (PageCursor cursor = pfA.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
assertTrue(cursor.next());
}
try (PageCursor cursor = pfB.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
assertTrue(cursor.next());
}
try (PageCursor cursor = pfC.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
assertTrue(cursor.next());
}
flusher = executor.submit(() -> {
pfA.flushAndForce();
pfB.flushAndForce();
pfC.flushAndForce();
return null;
});
// Flusher is now stuck inside flushAndForce.
limiterStartLatch.await();
}
// We should be able to unmap all the files.
// And then when the flusher resumes again, it should not throw any exceptions from the asynchronously
// closed files.
limiterBlockLatch.release();
// This must not throw.
flusher.get();
}
Aggregations