use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class QuickImport method main.
public static void main(String[] arguments) throws IOException {
Args args = Args.parse(arguments);
long nodeCount = parseLongWithUnit(args.get("nodes", null));
long relationshipCount = parseLongWithUnit(args.get("relationships", null));
int labelCount = args.getNumber("labels", 4).intValue();
int relationshipTypeCount = args.getNumber("relationship-types", 4).intValue();
File dir = new File(args.get(ImportTool.Options.STORE_DIR.key()));
long randomSeed = args.getNumber("random-seed", currentTimeMillis()).longValue();
Configuration config = COMMAS;
Extractors extractors = new Extractors(config.arrayDelimiter());
IdType idType = IdType.valueOf(args.get("id-type", IdType.ACTUAL.name()));
Header nodeHeader = parseNodeHeader(args, idType, extractors);
Header relationshipHeader = parseRelationshipHeader(args, idType, extractors);
FormattedLogProvider sysoutLogProvider = FormattedLogProvider.toOutputStream(System.out);
org.neo4j.unsafe.impl.batchimport.Configuration importConfig = new Default() {
@Override
public int maxNumberOfProcessors() {
return args.getNumber(ImportTool.Options.PROCESSORS.key(), super.maxNumberOfProcessors()).intValue();
}
@Override
public int denseNodeThreshold() {
return args.getNumber(dense_node_threshold.name(), super.denseNodeThreshold()).intValue();
}
};
SimpleDataGenerator generator = new SimpleDataGenerator(nodeHeader, relationshipHeader, randomSeed, nodeCount, labelCount, relationshipTypeCount, idType);
Input input = new DataGeneratorInput(nodeCount, relationshipCount, generator.nodes(), generator.relationships(), idType, silentBadCollector(0));
try (FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction()) {
BatchImporter consumer;
if (args.getBoolean("to-csv")) {
consumer = new CsvOutput(dir, nodeHeader, relationshipHeader, config);
} else {
consumer = new ParallelBatchImporter(dir, fileSystem, importConfig, new SimpleLogService(sysoutLogProvider, sysoutLogProvider), defaultVisible(), Config.defaults());
}
consumer.doImport(input);
}
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class PageCacheSlowTest method pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures.
@RepeatRule.Repeat(times = 1000)
@Test(timeout = LONG_TIMEOUT_MILLIS)
public void pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures() throws Exception {
// NOTE: This test is inherently non-deterministic. This means that every failure must be
// thoroughly investigated, since they have a good chance of being a real issue.
// This is effectively a targeted robustness test.
RandomAdversary adversary = new RandomAdversary(0.5, 0.2, 0.2);
adversary.setProbabilityFactor(0.0);
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, this.fs);
ThreadLocalRandom rng = ThreadLocalRandom.current();
// Because our test failures are non-deterministic, we use this tracer to capture a full history of the
// events leading up to any given failure.
LinearTracers linearTracers = LinearHistoryTracerFactory.pageCacheTracer();
getPageCache(fs, maxPages, pageCachePageSize, linearTracers.getPageCacheTracer(), linearTracers.getCursorTracerSupplier());
PagedFile pfA = pageCache.map(existingFile("a"), filePageSize);
PagedFile pfB = pageCache.map(existingFile("b"), filePageSize / 2 + 1);
adversary.setProbabilityFactor(1.0);
for (int i = 0; i < 1000; i++) {
PagedFile pagedFile = rng.nextBoolean() ? pfA : pfB;
long maxPageId = pagedFile.getLastPageId();
boolean performingRead = rng.nextBoolean() && maxPageId != -1;
long startingPage = maxPageId < 0 ? 0 : rng.nextLong(maxPageId + 1);
int pf_flags = performingRead ? PF_SHARED_READ_LOCK : PF_SHARED_WRITE_LOCK;
int pageSize = pagedFile.pageSize();
try (PageCursor cursor = pagedFile.io(startingPage, pf_flags)) {
if (performingRead) {
performConsistentAdversarialRead(cursor, maxPageId, startingPage, pageSize);
} else {
performConsistentAdversarialWrite(cursor, rng, pageSize);
}
} catch (AssertionError error) {
// Capture any exception that might have hit the eviction thread.
adversary.setProbabilityFactor(0.0);
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
for (int j = 0; j < 100; j++) {
cursor.next(rng.nextLong(maxPageId + 1));
}
} catch (Throwable throwable) {
error.addSuppressed(throwable);
}
throw error;
} catch (Throwable throwable) {
// Don't worry about it... it's fine!
// throwable.printStackTrace(); // only enable this when debugging test failures.
}
}
// Unmapping will cause pages to be flushed.
// We don't want that to fail, since it will upset the test tear-down.
adversary.setProbabilityFactor(0.0);
try {
// Flushing all pages, if successful, should clear any internal
// exception.
pageCache.flushAndForce();
// Do some post-chaos verification of what has been written.
verifyAdversarialPagedContent(pfA);
verifyAdversarialPagedContent(pfB);
pfA.close();
pfB.close();
} catch (Throwable e) {
linearTracers.printHistory(System.err);
throw e;
}
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class PageCacheTest method flushingDuringPagedFileCloseMustRetryUntilItSucceeds.
@RepeatRule.Repeat(times = 100)
@Test(timeout = SHORT_TIMEOUT_MILLIS)
public void flushingDuringPagedFileCloseMustRetryUntilItSucceeds() throws IOException {
FileSystemAbstraction fs = new DelegatingFileSystemAbstraction(this.fs) {
@Override
public StoreChannel open(File fileName, String mode) throws IOException {
return new DelegatingStoreChannel(super.open(fileName, mode)) {
private int writeCount = 0;
@Override
public void writeAll(ByteBuffer src, long position) throws IOException {
if (writeCount++ < 10) {
throw new IOException("This is a benign exception that we expect to be thrown " + "during a flush of a PagedFile.");
}
super.writeAll(src, position);
}
};
}
};
getPageCache(fs, maxPages, pageCachePageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL);
PrintStream oldSystemErr = System.err;
try (PagedFile pf = pageCache.map(file("a"), filePageSize);
PageCursor cursor = pf.io(0, PF_SHARED_WRITE_LOCK)) {
assertTrue(cursor.next());
writeRecords(cursor);
// Silence any stack traces the failed flushes might print.
System.setErr(new PrintStream(new ByteArrayOutputStream()));
} finally {
System.setErr(oldSystemErr);
}
verifyRecordsInFile(file("a"), recordsPerFilePage);
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class PageCacheTest method channelMustBeForcedAfterPagedFileFlushAndForce.
@Test
public void channelMustBeForcedAfterPagedFileFlushAndForce() throws Exception {
final AtomicInteger writeCounter = new AtomicInteger();
final AtomicInteger forceCounter = new AtomicInteger();
FileSystemAbstraction fs = writeAndForceCountingFs(writeCounter, forceCounter);
getPageCache(fs, maxPages, pageCachePageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL);
try (PagedFile pagedFile = pageCache.map(file("a"), filePageSize)) {
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
assertTrue(cursor.next());
cursor.putInt(1);
assertTrue(cursor.next());
cursor.putInt(1);
}
pagedFile.flushAndForce();
// we might race with background flushing
assertThat(writeCounter.get(), greaterThanOrEqualTo(2));
assertThat(forceCounter.get(), is(1));
}
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class PageCacheTest method channelsMustBeForcedAfterPageCacheFlushAndForce.
@Test
public void channelsMustBeForcedAfterPageCacheFlushAndForce() throws Exception {
final AtomicInteger writeCounter = new AtomicInteger();
final AtomicInteger forceCounter = new AtomicInteger();
FileSystemAbstraction fs = writeAndForceCountingFs(writeCounter, forceCounter);
getPageCache(fs, maxPages, pageCachePageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL);
try (PagedFile pagedFileA = pageCache.map(existingFile("a"), filePageSize);
PagedFile pagedFileB = pageCache.map(existingFile("b"), filePageSize)) {
try (PageCursor cursor = pagedFileA.io(0, PF_SHARED_WRITE_LOCK)) {
assertTrue(cursor.next());
cursor.putInt(1);
assertTrue(cursor.next());
cursor.putInt(1);
}
try (PageCursor cursor = pagedFileB.io(0, PF_SHARED_WRITE_LOCK)) {
assertTrue(cursor.next());
cursor.putInt(1);
}
pageCache.flushAndForce();
// we might race with background flushing
assertThat(writeCounter.get(), greaterThanOrEqualTo(3));
assertThat(forceCounter.get(), is(2));
}
}
Aggregations