use of org.neo4j.io.pagecache.tracing.linear.LinearTracers in project neo4j by neo4j.
the class PageCacheSlowTest method pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures.
@RepeatRule.Repeat(times = 1000)
@Test(timeout = LONG_TIMEOUT_MILLIS)
public void pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures() throws Exception {
// NOTE: This test is inherently non-deterministic. This means that every failure must be
// thoroughly investigated, since they have a good chance of being a real issue.
// This is effectively a targeted robustness test.
RandomAdversary adversary = new RandomAdversary(0.5, 0.2, 0.2);
adversary.setProbabilityFactor(0.0);
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, this.fs);
ThreadLocalRandom rng = ThreadLocalRandom.current();
// Because our test failures are non-deterministic, we use this tracer to capture a full history of the
// events leading up to any given failure.
LinearTracers linearTracers = LinearHistoryTracerFactory.pageCacheTracer();
getPageCache(fs, maxPages, pageCachePageSize, linearTracers.getPageCacheTracer(), linearTracers.getCursorTracerSupplier());
PagedFile pfA = pageCache.map(existingFile("a"), filePageSize);
PagedFile pfB = pageCache.map(existingFile("b"), filePageSize / 2 + 1);
adversary.setProbabilityFactor(1.0);
for (int i = 0; i < 1000; i++) {
PagedFile pagedFile = rng.nextBoolean() ? pfA : pfB;
long maxPageId = pagedFile.getLastPageId();
boolean performingRead = rng.nextBoolean() && maxPageId != -1;
long startingPage = maxPageId < 0 ? 0 : rng.nextLong(maxPageId + 1);
int pf_flags = performingRead ? PF_SHARED_READ_LOCK : PF_SHARED_WRITE_LOCK;
int pageSize = pagedFile.pageSize();
try (PageCursor cursor = pagedFile.io(startingPage, pf_flags)) {
if (performingRead) {
performConsistentAdversarialRead(cursor, maxPageId, startingPage, pageSize);
} else {
performConsistentAdversarialWrite(cursor, rng, pageSize);
}
} catch (AssertionError error) {
// Capture any exception that might have hit the eviction thread.
adversary.setProbabilityFactor(0.0);
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
for (int j = 0; j < 100; j++) {
cursor.next(rng.nextLong(maxPageId + 1));
}
} catch (Throwable throwable) {
error.addSuppressed(throwable);
}
throw error;
} catch (Throwable throwable) {
// Don't worry about it... it's fine!
// throwable.printStackTrace(); // only enable this when debugging test failures.
}
}
// Unmapping will cause pages to be flushed.
// We don't want that to fail, since it will upset the test tear-down.
adversary.setProbabilityFactor(0.0);
try {
// Flushing all pages, if successful, should clear any internal
// exception.
pageCache.flushAndForce();
// Do some post-chaos verification of what has been written.
verifyAdversarialPagedContent(pfA);
verifyAdversarialPagedContent(pfB);
pfA.close();
pfB.close();
} catch (Throwable e) {
linearTracers.printHistory(System.err);
throw e;
}
}
Aggregations