use of org.neo4j.adversaries.RandomAdversary in project neo4j by neo4j.
the class FileSenderTest method sendLargeFileWithUnreliableReadBufferSize.
@Test
public void sendLargeFileWithUnreliableReadBufferSize() throws Exception {
// given
byte[] bytes = new byte[MAX_SIZE * 3];
random.nextBytes(bytes);
File smallFile = testDirectory.file("smallFile");
try (StoreChannel storeChannel = fs.create(smallFile)) {
storeChannel.write(ByteBuffer.wrap(bytes));
}
Adversary adversary = new RandomAdversary(0.9, 0.0, 0.0);
AdversarialFileSystemAbstraction afs = new AdversarialFileSystemAbstraction(adversary, fs);
FileSender fileSender = new FileSender(afs.open(smallFile, "r"));
// when + then
assertFalse(fileSender.isEndOfInput());
assertEquals(FileChunk.create(copyOfRange(bytes, 0, MAX_SIZE), false), fileSender.readChunk(allocator));
assertEquals(FileChunk.create(copyOfRange(bytes, MAX_SIZE, MAX_SIZE * 2), false), fileSender.readChunk(allocator));
assertEquals(FileChunk.create(copyOfRange(bytes, MAX_SIZE * 2, bytes.length), true), fileSender.readChunk(allocator));
assertNull(fileSender.readChunk(allocator));
assertTrue(fileSender.isEndOfInput());
}
use of org.neo4j.adversaries.RandomAdversary in project neo4j by neo4j.
the class PageCacheSlowTest method pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures.
@RepeatRule.Repeat(times = 1000)
@Test(timeout = LONG_TIMEOUT_MILLIS)
public void pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures() throws Exception {
// NOTE: This test is inherently non-deterministic. This means that every failure must be
// thoroughly investigated, since they have a good chance of being a real issue.
// This is effectively a targeted robustness test.
RandomAdversary adversary = new RandomAdversary(0.5, 0.2, 0.2);
adversary.setProbabilityFactor(0.0);
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, this.fs);
ThreadLocalRandom rng = ThreadLocalRandom.current();
// Because our test failures are non-deterministic, we use this tracer to capture a full history of the
// events leading up to any given failure.
LinearTracers linearTracers = LinearHistoryTracerFactory.pageCacheTracer();
getPageCache(fs, maxPages, pageCachePageSize, linearTracers.getPageCacheTracer(), linearTracers.getCursorTracerSupplier());
PagedFile pfA = pageCache.map(existingFile("a"), filePageSize);
PagedFile pfB = pageCache.map(existingFile("b"), filePageSize / 2 + 1);
adversary.setProbabilityFactor(1.0);
for (int i = 0; i < 1000; i++) {
PagedFile pagedFile = rng.nextBoolean() ? pfA : pfB;
long maxPageId = pagedFile.getLastPageId();
boolean performingRead = rng.nextBoolean() && maxPageId != -1;
long startingPage = maxPageId < 0 ? 0 : rng.nextLong(maxPageId + 1);
int pf_flags = performingRead ? PF_SHARED_READ_LOCK : PF_SHARED_WRITE_LOCK;
int pageSize = pagedFile.pageSize();
try (PageCursor cursor = pagedFile.io(startingPage, pf_flags)) {
if (performingRead) {
performConsistentAdversarialRead(cursor, maxPageId, startingPage, pageSize);
} else {
performConsistentAdversarialWrite(cursor, rng, pageSize);
}
} catch (AssertionError error) {
// Capture any exception that might have hit the eviction thread.
adversary.setProbabilityFactor(0.0);
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
for (int j = 0; j < 100; j++) {
cursor.next(rng.nextLong(maxPageId + 1));
}
} catch (Throwable throwable) {
error.addSuppressed(throwable);
}
throw error;
} catch (Throwable throwable) {
// Don't worry about it... it's fine!
// throwable.printStackTrace(); // only enable this when debugging test failures.
}
}
// Unmapping will cause pages to be flushed.
// We don't want that to fail, since it will upset the test tear-down.
adversary.setProbabilityFactor(0.0);
try {
// Flushing all pages, if successful, should clear any internal
// exception.
pageCache.flushAndForce();
// Do some post-chaos verification of what has been written.
verifyAdversarialPagedContent(pfA);
verifyAdversarialPagedContent(pfB);
pfA.close();
pfB.close();
} catch (Throwable e) {
linearTracers.printHistory(System.err);
throw e;
}
}
use of org.neo4j.adversaries.RandomAdversary in project neo4j by neo4j.
the class RandomPageCacheTestHarness method runIteration.
@SuppressWarnings("unchecked")
private void runIteration(long timeout, TimeUnit unit) throws Exception {
assert filePageSize % recordFormat.getRecordSize() == 0 : "File page size must be a multiple of the record size";
if (!fixedRandomSeed) {
randomSeed = ThreadLocalRandom.current().nextLong();
}
FileSystemAbstraction fs = this.fs;
File[] files = buildFileNames();
RandomAdversary adversary = new RandomAdversary(mischiefRate, failureRate, errorRate);
adversary.setProbabilityFactor(0.0);
if (useAdversarialIO) {
adversary.setSeed(randomSeed);
fs = new AdversarialFileSystemAbstraction(adversary, fs);
}
PageSwapperFactory swapperFactory = new SingleFilePageSwapperFactory();
swapperFactory.setFileSystemAbstraction(fs);
MuninnPageCache cache = new MuninnPageCache(swapperFactory, cachePageCount, cachePageSize, tracer, cursorTracerSupplier);
cache.setPrintExceptionsOnClose(false);
Map<File, PagedFile> fileMap = new HashMap<>(files.length);
for (int i = 0; i < Math.min(files.length, initialMappedFiles); i++) {
File file = files[i];
fileMap.put(file, cache.map(file, filePageSize));
}
plan = plan(cache, files, fileMap);
Callable<Void> planRunner = new PlanRunner(plan);
Future<Void>[] futures = new Future[concurrencyLevel];
ExecutorService executor = Executors.newFixedThreadPool(concurrencyLevel);
for (int i = 0; i < concurrencyLevel; i++) {
futures[i] = executor.submit(planRunner);
}
if (preparation != null) {
preparation.run(cache, this.fs, plan.getFilesTouched());
}
adversary.setProbabilityFactor(1.0);
plan.start();
long deadlineMillis = System.currentTimeMillis() + unit.toMillis(timeout);
long now;
try {
for (Future<Void> future : futures) {
now = System.currentTimeMillis();
if (deadlineMillis < now) {
throw new TimeoutException();
}
future.get(deadlineMillis - now, TimeUnit.MILLISECONDS);
}
adversary.setProbabilityFactor(0.0);
runVerificationPhase(cache);
} finally {
adversary.setProbabilityFactor(0.0);
for (Future<Void> future : futures) {
future.cancel(true);
}
executor.shutdown();
now = System.currentTimeMillis();
executor.awaitTermination(deadlineMillis - now, TimeUnit.MILLISECONDS);
plan.close();
cache.close();
if (this.fs instanceof EphemeralFileSystemAbstraction) {
this.fs.close();
this.fs = new EphemeralFileSystemAbstraction();
} else {
for (File file : files) {
file.delete();
}
}
}
}
use of org.neo4j.adversaries.RandomAdversary in project neo4j by neo4j.
the class SingleFilePageSwapperTest method mustHandleMischiefInPositionedVectoredWrite.
@Test
public void mustHandleMischiefInPositionedVectoredWrite() throws Exception {
int bytesTotal = 512;
int bytesPerPage = 32;
int pageCount = bytesTotal / bytesPerPage;
byte[] data = new byte[bytesTotal];
ThreadLocalRandom.current().nextBytes(data);
ByteBufferPage zeroPage = createPage(bytesPerPage);
clear(zeroPage);
File file = getFile();
PageSwapperFactory factory = createSwapperFactory();
RandomAdversary adversary = new RandomAdversary(0.5, 0.0, 0.0);
factory.setFileSystemAbstraction(new AdversarialFileSystemAbstraction(adversary, getFs()));
PageSwapper swapper = createSwapper(factory, file, bytesPerPage, NO_CALLBACK, true);
ByteBufferPage[] writePages = new ByteBufferPage[pageCount];
ByteBufferPage[] readPages = new ByteBufferPage[pageCount];
ByteBufferPage[] zeroPages = new ByteBufferPage[pageCount];
for (int i = 0; i < pageCount; i++) {
writePages[i] = createPage(bytesPerPage);
writePages[i].putBytes(data, 0, i * bytesPerPage, bytesPerPage);
readPages[i] = createPage(bytesPerPage);
zeroPages[i] = zeroPage;
}
try {
for (int i = 0; i < 10_000; i++) {
adversary.setProbabilityFactor(0);
swapper.write(0, zeroPages, 0, pageCount);
adversary.setProbabilityFactor(1);
swapper.write(0, writePages, 0, pageCount);
for (ByteBufferPage readPage : readPages) {
clear(readPage);
}
adversary.setProbabilityFactor(0);
assertThat(swapper.read(0, readPages, 0, pageCount), is((long) bytesTotal));
for (int j = 0; j < pageCount; j++) {
assertThat(array(readPages[j].buffer), is(array(writePages[j].buffer)));
}
}
} finally {
swapper.close();
}
}
Aggregations