use of org.neo4j.kernel.impl.storemigration.DirectRecordStoreMigrator in project neo4j by neo4j.
the class StoreMigrator method prepareBatchImportMigration.
private void prepareBatchImportMigration(File storeDir, File migrationDir, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
createStore(migrationDir, newFormat);
// We use the batch importer for migrating the data, and we use it in a special way where we only
// rewrite the stores that have actually changed format. We know that to be node and relationship
// stores. Although since the batch importer also populates the counts store, all labels need to
// be read, i.e. both inlined and those existing in dynamic records. That's why we need to copy
// that dynamic record store over before doing the "batch import".
// Copying this file just as-is assumes that the format hasn't change. If that happens we're in
// a different situation, where we first need to migrate this file.
// The token stores also need to be migrated because we use those as-is and ask for their high ids
// when using the importer in the store migration scenario.
StoreFile[] storesFilesToMigrate = { StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.NODE_LABEL_STORE };
if (newFormat.dynamic().equals(oldFormat.dynamic())) {
// We use the page cache for copying the STORE files since these might be on a block device.
for (StoreFile file : storesFilesToMigrate) {
File fromPath = new File(storeDir, file.fileName(StoreFileType.STORE));
File toPath = new File(migrationDir, file.fileName(StoreFileType.STORE));
int pageSize = pageCache.pageSize();
try (PagedFile fromFile = pageCache.map(fromPath, pageSize);
PagedFile toFile = pageCache.map(toPath, pageSize, StandardOpenOption.CREATE);
PageCursor fromCursor = fromFile.io(0L, PagedFile.PF_SHARED_READ_LOCK);
PageCursor toCursor = toFile.io(0L, PagedFile.PF_SHARED_WRITE_LOCK)) {
while (fromCursor.next()) {
toCursor.next();
do {
fromCursor.copyTo(0, toCursor, 0, pageSize);
} while (fromCursor.shouldRetry());
}
} catch (NoSuchFileException e) {
// It is okay for the file to not be there.
}
}
// The ID files are to be kept on the normal file system, hence we use fileOperation to copy them.
StoreFile.fileOperation(COPY, fileSystem, storeDir, migrationDir, Arrays.asList(storesFilesToMigrate), // OK if it's not there (1.9)
true, ExistingTargetStrategy.FAIL, StoreFileType.ID);
} else {
// Migrate all token stores, schema store and dynamic node label ids, keeping their ids intact
DirectRecordStoreMigrator migrator = new DirectRecordStoreMigrator(pageCache, fileSystem, config);
StoreType[] storesToMigrate = { StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.NODE_LABEL, StoreType.SCHEMA };
// Migrate these stores silently because they are usually very small
MigrationProgressMonitor.Section section = SilentMigrationProgressMonitor.NO_OP_SECTION;
migrator.migrate(storeDir, oldFormat, migrationDir, newFormat, section, storesToMigrate, StoreType.NODE);
}
}
Aggregations