use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class PageCacheHarnessTest method concurrentPageFaultingMustNotPutInterleavedDataIntoPages.
@Test(timeout = LONG_TIMEOUT_MILLIS)
public void concurrentPageFaultingMustNotPutInterleavedDataIntoPages() throws Exception {
final int filePageCount = 11;
final RecordFormat recordFormat = new PageCountRecordFormat();
try (RandomPageCacheTestHarness harness = new RandomPageCacheTestHarness()) {
harness.setConcurrencyLevel(11);
harness.setUseAdversarialIO(false);
harness.setCachePageCount(3);
harness.setCachePageSize(pageCachePageSize);
harness.setFilePageCount(filePageCount);
harness.setFilePageSize(pageCachePageSize);
harness.setInitialMappedFiles(1);
harness.setCommandCount(10000);
harness.setRecordFormat(recordFormat);
harness.setFileSystem(fs);
harness.disableCommands(FlushCache, FlushFile, MapFile, UnmapFile, WriteRecord, WriteMulti);
harness.setPreparation((pageCache1, fs1, filesTouched) -> {
File file = filesTouched.iterator().next();
try (PagedFile pf = pageCache1.map(file, pageCachePageSize);
PageCursor cursor = pf.io(0, PF_SHARED_WRITE_LOCK)) {
for (int pageId = 0; pageId < filePageCount; pageId++) {
cursor.next();
recordFormat.fillWithRecords(cursor);
}
}
});
harness.run(LONG_TIMEOUT_MILLIS, MILLISECONDS);
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MuninnPageCacheTest method mustFlushDirtyPagesOnEvictingFirstPage.
@Test
public void mustFlushDirtyPagesOnEvictingFirstPage() throws Exception {
writeInitialDataTo(file("a"));
RecordingPageCacheTracer tracer = new RecordingPageCacheTracer();
RecordingPageCursorTracer cursorTracer = new RecordingPageCursorTracer();
ConfigurablePageCursorTracerSupplier cursorTracerSupplier = new ConfigurablePageCursorTracerSupplier(cursorTracer);
MuninnPageCache pageCache = createPageCache(fs, 2, 8, blockCacheFlush(tracer), cursorTracerSupplier);
PagedFile pagedFile = pageCache.map(file("a"), 8);
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
assertTrue(cursor.next());
cursor.putLong(0L);
}
cursorTracer.reportEvents();
assertNotNull(cursorTracer.observe(Fault.class));
assertEquals(1, cursorTracer.faults());
assertEquals(1, tracer.faults());
int clockArm = pageCache.evictPages(1, 0, tracer.beginPageEvictions(1));
assertThat(clockArm, is(1));
assertNotNull(tracer.observe(Evict.class));
ByteBuffer buf = ByteBuffer.allocate(16);
StoreChannel channel = fs.open(file("a"), "r");
channel.read(buf);
buf.flip();
assertThat(buf.getLong(), is(0L));
assertThat(buf.getLong(), is(y));
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MuninnPageCacheTest method mustEvictCleanPageWithoutFlushing.
@Test
public void mustEvictCleanPageWithoutFlushing() throws Exception {
writeInitialDataTo(file("a"));
RecordingPageCacheTracer tracer = new RecordingPageCacheTracer();
RecordingPageCursorTracer cursorTracer = new RecordingPageCursorTracer();
ConfigurablePageCursorTracerSupplier cursorTracerSupplier = new ConfigurablePageCursorTracerSupplier(cursorTracer);
MuninnPageCache pageCache = createPageCache(fs, 2, 8, blockCacheFlush(tracer), cursorTracerSupplier);
PagedFile pagedFile = pageCache.map(file("a"), 8);
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_READ_LOCK)) {
assertTrue(cursor.next());
}
cursorTracer.reportEvents();
assertNotNull(cursorTracer.observe(Fault.class));
assertEquals(1, cursorTracer.faults());
assertEquals(1, tracer.faults());
int clockArm = pageCache.evictPages(1, 0, tracer.beginPageEvictions(1));
assertThat(clockArm, is(1));
assertNotNull(tracer.observe(Evict.class));
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MuninnPageCacheTest method mustThrowIfMappingFileWouldOverflowReferenceCount.
@Test(timeout = SEMI_LONG_TIMEOUT_MILLIS)
public void mustThrowIfMappingFileWouldOverflowReferenceCount() throws Exception {
File file = file("a");
writeInitialDataTo(file);
MuninnPageCache pageCache = createPageCache(fs, 30, pageCachePageSize, PageCacheTracer.NULL, DefaultPageCursorTracerSupplier.NULL);
PagedFile pf = null;
int i = 0;
try {
expectedException.expect(IllegalStateException.class);
for (; i < Integer.MAX_VALUE; i++) {
pf = pageCache.map(file, filePageSize);
}
} finally {
for (int j = 0; j < i; j++) {
try {
pf.close();
} catch (Exception e) {
//noinspection ThrowFromFinallyBlock
throw new AssertionError("Did not expect pf.close() to throw", e);
}
}
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class RandomPageCacheTestHarness method runIteration.
@SuppressWarnings("unchecked")
private void runIteration(long timeout, TimeUnit unit) throws Exception {
assert filePageSize % recordFormat.getRecordSize() == 0 : "File page size must be a multiple of the record size";
if (!fixedRandomSeed) {
randomSeed = ThreadLocalRandom.current().nextLong();
}
FileSystemAbstraction fs = this.fs;
File[] files = buildFileNames();
RandomAdversary adversary = new RandomAdversary(mischiefRate, failureRate, errorRate);
adversary.setProbabilityFactor(0.0);
if (useAdversarialIO) {
adversary.setSeed(randomSeed);
fs = new AdversarialFileSystemAbstraction(adversary, fs);
}
PageSwapperFactory swapperFactory = new SingleFilePageSwapperFactory();
swapperFactory.setFileSystemAbstraction(fs);
MuninnPageCache cache = new MuninnPageCache(swapperFactory, cachePageCount, cachePageSize, tracer, cursorTracerSupplier);
cache.setPrintExceptionsOnClose(false);
Map<File, PagedFile> fileMap = new HashMap<>(files.length);
for (int i = 0; i < Math.min(files.length, initialMappedFiles); i++) {
File file = files[i];
fileMap.put(file, cache.map(file, filePageSize));
}
plan = plan(cache, files, fileMap);
Callable<Void> planRunner = new PlanRunner(plan);
Future<Void>[] futures = new Future[concurrencyLevel];
ExecutorService executor = Executors.newFixedThreadPool(concurrencyLevel);
for (int i = 0; i < concurrencyLevel; i++) {
futures[i] = executor.submit(planRunner);
}
if (preparation != null) {
preparation.run(cache, this.fs, plan.getFilesTouched());
}
adversary.setProbabilityFactor(1.0);
plan.start();
long deadlineMillis = System.currentTimeMillis() + unit.toMillis(timeout);
long now;
try {
for (Future<Void> future : futures) {
now = System.currentTimeMillis();
if (deadlineMillis < now) {
throw new TimeoutException();
}
future.get(deadlineMillis - now, TimeUnit.MILLISECONDS);
}
adversary.setProbabilityFactor(0.0);
runVerificationPhase(cache);
} finally {
adversary.setProbabilityFactor(0.0);
for (Future<Void> future : futures) {
future.cancel(true);
}
executor.shutdown();
now = System.currentTimeMillis();
executor.awaitTermination(deadlineMillis - now, TimeUnit.MILLISECONDS);
plan.close();
cache.close();
if (this.fs instanceof EphemeralFileSystemAbstraction) {
this.fs.close();
this.fs = new EphemeralFileSystemAbstraction();
} else {
for (File file : files) {
file.delete();
}
}
}
}
Aggregations