use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class PageCacheTest method flushAndForceMustNotLockPageCacheForWholeDuration.
@Test
void flushAndForceMustNotLockPageCacheForWholeDuration() {
assertTimeoutPreemptively(ofMillis(SHORT_TIMEOUT_MILLIS), () -> {
maxPages = 5000;
configureStandardPageCache();
Path a = existingFile("a");
Path b = existingFile("b");
BinaryLatch limiterStartLatch = new BinaryLatch();
BinaryLatch limiterBlockLatch = new BinaryLatch();
var ioController = new EmptyIOController() {
@Override
public void maybeLimitIO(int recentlyCompletedIOs, Flushable flushable, MajorFlushEvent flushEvent) {
limiterStartLatch.release();
limiterBlockLatch.await();
super.maybeLimitIO(recentlyCompletedIOs, flushable, flushEvent);
}
};
try (PagedFile pfA = pageCache.map(a, filePageSize, DEFAULT_DATABASE_NAME, immutable.empty(), ioController)) {
// Dirty a bunch of pages.
try (PageCursor cursor = pfA.io(0, PF_SHARED_WRITE_LOCK, NULL)) {
for (int i = 0; i < maxPages; i++) {
assertTrue(cursor.next());
}
}
Future<?> flusher = executor.submit(() -> {
pfA.flushAndForce();
return null;
});
// Flusher is now stuck inside flushAndForce.
limiterStartLatch.await();
// We should be able to map and close paged files.
map(pageCache, b, filePageSize).close();
// We should be able to get and list existing mappings.
pageCache.listExistingMappings();
pageCache.getExistingMapping(a).ifPresent(PagedFile::close);
limiterBlockLatch.release();
flusher.get();
}
});
}
use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class MuninnPagedFile method flushLockedPage.
boolean flushLockedPage(long pageRef, long filePageId) {
boolean success = false;
try (MajorFlushEvent flushEvent = pageCacheTracer.beginFileFlush(swapper)) {
FlushEvent flush = flushEvent.beginFlush(pageRef, swapper, this);
long address = getAddress(pageRef);
try {
long bytesWritten = swapper.write(filePageId, address);
flush.addBytesWritten(bytesWritten);
flush.addPagesFlushed(1);
flush.done();
success = true;
} catch (IOException e) {
flush.done(e);
}
}
return success;
}
use of org.neo4j.io.pagecache.tracing.MajorFlushEvent in project neo4j by neo4j.
the class MuninnPagedFile method vectoredFlush.
private void vectoredFlush(long[] pages, long[] bufferAddresses, long[] flushStamps, int[] bufferLengths, int numberOfBuffers, int pagesToFlush, int pagesMerged, MajorFlushEvent flushEvent, boolean forClosing) throws IOException {
FlushEvent flush = null;
boolean successful = false;
try {
// Write the pages vector
long firstPageRef = pages[0];
long startFilePageId = getFilePageId(firstPageRef);
flush = flushEvent.beginFlush(pages, swapper, this, pagesToFlush, pagesMerged);
long bytesWritten = swapper.write(startFilePageId, bufferAddresses, bufferLengths, numberOfBuffers, pagesToFlush);
// Update the flush event
flush.addBytesWritten(bytesWritten);
flush.addPagesFlushed(pagesToFlush);
flush.addPagesMerged(pagesMerged);
flush.done();
successful = true;
// There are now 0 'grabbed' pages
} catch (IOException ioe) {
if (flush != null) {
flush.done(ioe);
}
throw ioe;
} finally {
// Always unlock all the pages in the vector
if (forClosing) {
for (int i = 0; i < pagesToFlush; i++) {
long pageRef = pages[i];
if (successful) {
explicitlyMarkPageUnmodifiedUnderExclusiveLock(pageRef);
}
unlockExclusive(pageRef);
}
} else {
for (int i = 0; i < pagesToFlush; i++) {
unlockFlush(pages[i], flushStamps[i], successful);
}
}
}
}
Aggregations