use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class GBPTreeRecoveryTest method shouldRecoverFromCrashBeforeFirstCheckpoint.
@Test
public void shouldRecoverFromCrashBeforeFirstCheckpoint() throws Exception {
// GIVEN
// a tree with only small amount of data that has not yet seen checkpoint from outside
File file = directory.file("index");
{
PageCache pageCache = createPageCache();
GBPTree<MutableLong, MutableLong> index = createIndex(pageCache, file);
Writer<MutableLong, MutableLong> writer = index.writer();
key.setValue(1L);
value.setValue(10L);
writer.put(key, value);
pageCache.flushAndForce();
fs.snapshot(throwing(() -> {
writer.close();
index.close();
pageCache.close();
}));
}
// WHEN
try (PageCache pageCache = createPageCache();
GBPTree<MutableLong, MutableLong> index = createIndex(pageCache, file)) {
// this is the mimic:ed recovery
index.prepareForRecovery();
index.finishRecovery();
try (Writer<MutableLong, MutableLong> writer = index.writer()) {
writer.put(key, value);
}
// THEN
// we should end up with a consistent index
index.consistencyCheck();
// ... containing all the stuff load says
try (RawCursor<Hit<MutableLong, MutableLong>, IOException> cursor = index.seek(new MutableLong(Long.MIN_VALUE), new MutableLong(Long.MAX_VALUE))) {
assertTrue(cursor.next());
Hit<MutableLong, MutableLong> hit = cursor.get();
assertEquals(key.getValue(), hit.key().getValue());
assertEquals(value.getValue(), hit.value().getValue());
}
}
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class CrashGenerationCleanerTest method setupPagedFile.
@Before
public void setupPagedFile() throws IOException {
PageCache pageCache = pageCacheRule.getPageCache(fileSystemRule.get(), config().withPageSize(PAGE_SIZE).withAccessChecks(true));
pagedFile = pageCache.map(testDirectory.file(FILE_NAME), PAGE_SIZE, CREATE, DELETE_ON_CLOSE);
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class AccessCheckingPageCacheTest method getPageCursor.
@Before
public void getPageCursor() throws IOException {
PageCache mockedPageCache = mock(PageCache.class);
PagedFile mockedPagedFile = mock(PagedFile.class);
PageCursor mockedCursor = mock(PageCursor.class);
when(mockedPagedFile.io(anyLong(), anyInt())).thenReturn(mockedCursor);
when(mockedPageCache.map(any(File.class), anyInt(), anyVararg())).thenReturn(mockedPagedFile);
pageCache = new AccessCheckingPageCache(mockedPageCache);
PagedFile file = pageCache.map(new File("some file"), 512);
cursor = file.io(0, PagedFile.PF_SHARED_READ_LOCK);
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class PageCacheStressTest method run.
public void run() throws Exception {
try (FileSystemAbstraction fs = new DefaultFileSystemAbstraction()) {
PageSwapperFactory swapperFactory = new SingleFilePageSwapperFactory();
swapperFactory.setFileSystemAbstraction(fs);
try (PageCache pageCacheUnderTest = new MuninnPageCache(swapperFactory, numberOfCachePages, cachePageSize, tracer, pageCursorTracerSupplier)) {
PageCacheStresser pageCacheStresser = new PageCacheStresser(numberOfPages, numberOfThreads, workingDirectory);
pageCacheStresser.stress(pageCacheUnderTest, condition);
}
}
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class StoreMigrator method migrateWithBatchImporter.
private void migrateWithBatchImporter(File storeDir, File migrationDir, long lastTxId, long lastTxChecksum, long lastTxLogVersion, long lastTxLogByteOffset, MigrationProgressMonitor.Section progressMonitor, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
prepareBatchImportMigration(storeDir, migrationDir, oldFormat, newFormat);
boolean requiresDynamicStoreMigration = !newFormat.dynamic().equals(oldFormat.dynamic());
boolean requiresPropertyMigration = !newFormat.property().equals(oldFormat.property()) || requiresDynamicStoreMigration;
File badFile = new File(storeDir, Configuration.BAD_FILE_NAME);
try (NeoStores legacyStore = instantiateLegacyStore(oldFormat, storeDir);
RecordCursors nodeInputCursors = new RecordCursors(legacyStore);
RecordCursors relationshipInputCursors = new RecordCursors(legacyStore);
OutputStream badOutput = new BufferedOutputStream(new FileOutputStream(badFile, false))) {
Configuration importConfig = new Configuration.Overridden(config);
AdditionalInitialIds additionalInitialIds = readAdditionalIds(lastTxId, lastTxChecksum, lastTxLogVersion, lastTxLogByteOffset);
// We have to make sure to keep the token ids if we're migrating properties/labels
BatchImporter importer = new ParallelBatchImporter(migrationDir.getAbsoluteFile(), fileSystem, pageCache, importConfig, logService, withDynamicProcessorAssignment(migrationBatchImporterMonitor(legacyStore, progressMonitor, importConfig), importConfig), additionalInitialIds, config, newFormat);
InputIterable<InputNode> nodes = legacyNodesAsInput(legacyStore, requiresPropertyMigration, nodeInputCursors);
InputIterable<InputRelationship> relationships = legacyRelationshipsAsInput(legacyStore, requiresPropertyMigration, relationshipInputCursors);
importer.doImport(Inputs.input(nodes, relationships, IdMappers.actual(), IdGenerators.fromInput(), Collectors.badCollector(badOutput, 0)));
// During migration the batch importer doesn't necessarily writes all entities, depending on
// which stores needs migration. Node, relationship, relationship group stores are always written
// anyways and cannot be avoided with the importer, but delete the store files that weren't written
// (left empty) so that we don't overwrite those in the real store directory later.
Collection<StoreFile> storesToDeleteFromMigratedDirectory = new ArrayList<>();
storesToDeleteFromMigratedDirectory.add(StoreFile.NEO_STORE);
if (!requiresPropertyMigration) {
// We didn't migrate properties, so the property stores in the migrated store are just empty/bogus
storesToDeleteFromMigratedDirectory.addAll(asList(StoreFile.PROPERTY_STORE, StoreFile.PROPERTY_STRING_STORE, StoreFile.PROPERTY_ARRAY_STORE));
}
if (!requiresDynamicStoreMigration) {
// We didn't migrate labels (dynamic node labels) or any other dynamic store
storesToDeleteFromMigratedDirectory.addAll(asList(StoreFile.NODE_LABEL_STORE, StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.SCHEMA_STORE));
}
StoreFile.fileOperation(DELETE, fileSystem, migrationDir, null, storesToDeleteFromMigratedDirectory, true, null, StoreFileType.values());
// When migrating on a block device there might be some files only accessible via the page cache.
try {
Predicate<FileHandle> fileHandlePredicate = fileHandle -> storesToDeleteFromMigratedDirectory.stream().anyMatch(storeFile -> storeFile.fileName(StoreFileType.STORE).equals(fileHandle.getFile().getName()));
pageCache.streamFilesRecursive(migrationDir).filter(fileHandlePredicate).forEach(FileHandle.HANDLE_DELETE);
} catch (NoSuchFileException e) {
// This means that we had no files only present in the page cache, this is fine.
}
}
}
Aggregations