use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class GBPTree method openOrCreate.
private PagedFile openOrCreate(PageCache pageCache, File indexFile, int pageSizeForCreation, Layout<KEY, VALUE> layout) throws IOException {
try {
PagedFile pagedFile = pageCache.map(indexFile, pageCache.pageSize());
try {
readMeta(indexFile, layout, pagedFile);
pagedFile = mapWithCorrectPageSize(pageCache, indexFile, pagedFile);
return pagedFile;
} catch (Throwable t) {
try {
pagedFile.close();
} catch (IOException e) {
t.addSuppressed(e);
}
throw t;
}
} catch (NoSuchFileException e) {
// First time
monitor.noStoreFile();
pageSize = pageSizeForCreation == 0 ? pageCache.pageSize() : pageSizeForCreation;
if (pageSize > pageCache.pageSize()) {
throw new MetadataMismatchException("Tree in " + indexFile.getAbsolutePath() + " was about to be created with page size:" + pageSize + ", but page cache used to create it has a smaller page size:" + pageCache.pageSize() + " so cannot be created");
}
// We need to create this index
PagedFile pagedFile = pageCache.map(indexFile, pageSize, StandardOpenOption.CREATE);
created = true;
return pagedFile;
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class AccessCheckingPageCacheTest method getPageCursor.
@Before
public void getPageCursor() throws IOException {
PageCache mockedPageCache = mock(PageCache.class);
PagedFile mockedPagedFile = mock(PagedFile.class);
PageCursor mockedCursor = mock(PageCursor.class);
when(mockedPagedFile.io(anyLong(), anyInt())).thenReturn(mockedCursor);
when(mockedPageCache.map(any(File.class), anyInt(), anyVararg())).thenReturn(mockedPagedFile);
pageCache = new AccessCheckingPageCache(mockedPageCache);
PagedFile file = pageCache.map(new File("some file"), 512);
cursor = file.io(0, PagedFile.PF_SHARED_READ_LOCK);
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class PageCacheHarnessTest method concurrentPageFaultingMustNotPutInterleavedDataIntoPages.
@Test(timeout = LONG_TIMEOUT_MILLIS)
public void concurrentPageFaultingMustNotPutInterleavedDataIntoPages() throws Exception {
final int filePageCount = 11;
final RecordFormat recordFormat = new PageCountRecordFormat();
try (RandomPageCacheTestHarness harness = new RandomPageCacheTestHarness()) {
harness.setConcurrencyLevel(11);
harness.setUseAdversarialIO(false);
harness.setCachePageCount(3);
harness.setCachePageSize(pageCachePageSize);
harness.setFilePageCount(filePageCount);
harness.setFilePageSize(pageCachePageSize);
harness.setInitialMappedFiles(1);
harness.setCommandCount(10000);
harness.setRecordFormat(recordFormat);
harness.setFileSystem(fs);
harness.disableCommands(FlushCache, FlushFile, MapFile, UnmapFile, WriteRecord, WriteMulti);
harness.setPreparation((pageCache1, fs1, filesTouched) -> {
File file = filesTouched.iterator().next();
try (PagedFile pf = pageCache1.map(file, pageCachePageSize);
PageCursor cursor = pf.io(0, PF_SHARED_WRITE_LOCK)) {
for (int pageId = 0; pageId < filePageCount; pageId++) {
cursor.next();
recordFormat.fillWithRecords(cursor);
}
}
});
harness.run(LONG_TIMEOUT_MILLIS, MILLISECONDS);
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class ToFileStoreWriter method write.
@Override
public long write(String path, ReadableByteChannel data, ByteBuffer temporaryBuffer, boolean hasData, int requiredElementAlignment) throws IOException {
try {
temporaryBuffer.clear();
File file = new File(basePath, path);
file.getParentFile().mkdirs();
String filename = file.getName();
monitor.startReceivingStoreFile(file);
try {
// The reason is that we are copying to a temporary store location, and then we'll move the files later.
if (StoreType.shouldBeManagedByPageCache(filename)) {
int filePageSize = filePageSize(requiredElementAlignment);
try (PagedFile pagedFile = pageCache.map(file, filePageSize, CREATE, WRITE)) {
final long written = writeDataThroughPageCache(pagedFile, data, temporaryBuffer, hasData);
addPageCacheMoveAction(file);
return written;
}
}
// *after* we have done recovery on the store, and this may delete some files, and add other files.
return writeDataThroughFileSystem(file, data, temporaryBuffer, hasData);
} finally {
monitor.finishReceivingStoreFile(file);
}
} catch (Throwable t) {
throw new IOException(t);
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class RelationshipRecordFormatTest method shouldMarkBothUnitsAsUnusedWhenDeletingRecordWhichHasSecondaryUnit.
/*
* This test acts as a test group for whoever uses BaseHighLimitRecordFormat base class,
* the logic for marking both units as unused when deleting exists there.
*/
@Test
public void shouldMarkBothUnitsAsUnusedWhenDeletingRecordWhichHasSecondaryUnit() throws Exception {
// GIVEN a record which requires two units
PagedFile storeFile = mock(PagedFile.class);
when(storeFile.pageSize()).thenReturn(cursor.getCurrentPageSize());
long hugeValue = 1L << 48;
RelationshipRecord record = new RelationshipRecord(5).initialize(true, hugeValue + 1, hugeValue + 2, hugeValue + 3, 4, hugeValue + 5, hugeValue + 6, hugeValue + 7, hugeValue + 8, true, true);
record.setSecondaryUnitId(17);
record.setRequiresSecondaryUnit(true);
cursor.setOffset(offsetForId(record.getId(), cursor.getCurrentPageSize(), recordSize));
format.write(record, cursor, recordSize);
// WHEN deleting that record
record.setInUse(false);
cursor.setOffset(offsetForId(record.getId(), cursor.getCurrentPageSize(), recordSize));
format.write(record, cursor, recordSize);
// THEN both units should have been marked as unused
cursor.setOffset(offsetForId(record.getId(), cursor.getCurrentPageSize(), recordSize));
assertFalse(recordInUse(cursor));
cursor.setOffset(offsetForId(record.getSecondaryUnitId(), cursor.getCurrentPageSize(), recordSize));
assertFalse(recordInUse(cursor));
}
Aggregations