use of org.neo4j.kernel.impl.storemigration.StoreFile in project neo4j by neo4j.
the class StoreMigrator method moveMigratedFiles.
@Override
public void moveMigratedFiles(File migrationDir, File storeDir, String versionToUpgradeFrom, String versionToUpgradeTo) throws IOException {
// Move the migrated ones into the store directory
StoreFile.fileOperation(MOVE, fileSystem, migrationDir, storeDir, StoreFile.currentStoreFiles(), // allow to skip non existent source files
true, // allow to overwrite target files
ExistingTargetStrategy.OVERWRITE, StoreFileType.values());
// move the files with the page cache.
try {
Iterable<FileHandle> fileHandles = pageCache.streamFilesRecursive(migrationDir)::iterator;
for (FileHandle fh : fileHandles) {
Predicate<StoreFile> predicate = storeFile -> storeFile.fileName(StoreFileType.STORE).equals(fh.getFile().getName());
if (StreamSupport.stream(StoreFile.currentStoreFiles().spliterator(), false).anyMatch(predicate)) {
final Optional<PagedFile> optionalPagedFile = pageCache.getExistingMapping(fh.getFile());
if (optionalPagedFile.isPresent()) {
optionalPagedFile.get().close();
}
fh.rename(new File(storeDir, fh.getFile().getName()), StandardCopyOption.REPLACE_EXISTING);
}
}
} catch (NoSuchFileException e) {
//This means that we had no files only present in the page cache, this is fine.
}
RecordFormats oldFormat = selectForVersion(versionToUpgradeFrom);
RecordFormats newFormat = selectForVersion(versionToUpgradeTo);
boolean movingAwayFromVersionTrailers = oldFormat.hasCapability(VERSION_TRAILERS) && !newFormat.hasCapability(VERSION_TRAILERS);
if (movingAwayFromVersionTrailers) {
StoreFile.removeTrailers(versionToUpgradeFrom, fileSystem, storeDir, pageCache.pageSize());
}
File neoStore = new File(storeDir, MetaDataStore.DEFAULT_NAME);
long logVersion = MetaDataStore.getRecord(pageCache, neoStore, Position.LOG_VERSION);
long lastCommittedTx = MetaDataStore.getRecord(pageCache, neoStore, Position.LAST_TRANSACTION_ID);
// update or add upgrade id and time and other necessary neostore records
updateOrAddNeoStoreFieldsAsPartOfMigration(migrationDir, storeDir, versionToUpgradeTo);
// delete old logs
legacyLogs.deleteUnusedLogFiles(storeDir);
if (movingAwayFromVersionTrailers) {
// write a check point in the log in order to make recovery work in the newer version
new StoreMigratorCheckPointer(storeDir, fileSystem).checkPoint(logVersion, lastCommittedTx);
}
}
use of org.neo4j.kernel.impl.storemigration.StoreFile in project neo4j by neo4j.
the class StoreMigrator method rebuildCounts.
@Override
public void rebuildCounts(File storeDir, String versionToMigrateFrom, String versionToMigrateTo) throws IOException {
if (StandardV2_1.STORE_VERSION.equals(versionToMigrateFrom) || StandardV2_2.STORE_VERSION.equals(versionToMigrateFrom)) {
// create counters from scratch
Iterable<StoreFile> countsStoreFiles = Iterables.iterable(StoreFile.COUNTS_STORE_LEFT, StoreFile.COUNTS_STORE_RIGHT);
StoreFile.fileOperation(DELETE, fileSystem, storeDir, storeDir, countsStoreFiles, true, null, StoreFileType.STORE);
File neoStore = new File(storeDir, DEFAULT_NAME);
long lastTxId = MetaDataStore.getRecord(pageCache, neoStore, Position.LAST_TRANSACTION_ID);
rebuildCountsFromScratch(storeDir, lastTxId, pageCache);
}
}
use of org.neo4j.kernel.impl.storemigration.StoreFile in project neo4j by neo4j.
the class StoreMigrator method prepareBatchImportMigration.
private void prepareBatchImportMigration(File storeDir, File migrationDir, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
createStore(migrationDir, newFormat);
// We use the batch importer for migrating the data, and we use it in a special way where we only
// rewrite the stores that have actually changed format. We know that to be node and relationship
// stores. Although since the batch importer also populates the counts store, all labels need to
// be read, i.e. both inlined and those existing in dynamic records. That's why we need to copy
// that dynamic record store over before doing the "batch import".
// Copying this file just as-is assumes that the format hasn't change. If that happens we're in
// a different situation, where we first need to migrate this file.
// The token stores also need to be migrated because we use those as-is and ask for their high ids
// when using the importer in the store migration scenario.
StoreFile[] storesFilesToMigrate = { StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.NODE_LABEL_STORE };
if (newFormat.dynamic().equals(oldFormat.dynamic())) {
// We use the page cache for copying the STORE files since these might be on a block device.
for (StoreFile file : storesFilesToMigrate) {
File fromPath = new File(storeDir, file.fileName(StoreFileType.STORE));
File toPath = new File(migrationDir, file.fileName(StoreFileType.STORE));
int pageSize = pageCache.pageSize();
try (PagedFile fromFile = pageCache.map(fromPath, pageSize);
PagedFile toFile = pageCache.map(toPath, pageSize, StandardOpenOption.CREATE);
PageCursor fromCursor = fromFile.io(0L, PagedFile.PF_SHARED_READ_LOCK);
PageCursor toCursor = toFile.io(0L, PagedFile.PF_SHARED_WRITE_LOCK)) {
while (fromCursor.next()) {
toCursor.next();
do {
fromCursor.copyTo(0, toCursor, 0, pageSize);
} while (fromCursor.shouldRetry());
}
} catch (NoSuchFileException e) {
// It is okay for the file to not be there.
}
}
// The ID files are to be kept on the normal file system, hence we use fileOperation to copy them.
StoreFile.fileOperation(COPY, fileSystem, storeDir, migrationDir, Arrays.asList(storesFilesToMigrate), // OK if it's not there (1.9)
true, ExistingTargetStrategy.FAIL, StoreFileType.ID);
} else {
// Migrate all token stores, schema store and dynamic node label ids, keeping their ids intact
DirectRecordStoreMigrator migrator = new DirectRecordStoreMigrator(pageCache, fileSystem, config);
StoreType[] storesToMigrate = { StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.NODE_LABEL, StoreType.SCHEMA };
// Migrate these stores silently because they are usually very small
MigrationProgressMonitor.Section section = SilentMigrationProgressMonitor.NO_OP_SECTION;
migrator.migrate(storeDir, oldFormat, migrationDir, newFormat, section, storesToMigrate, StoreType.NODE);
}
}
use of org.neo4j.kernel.impl.storemigration.StoreFile in project neo4j by neo4j.
the class StoreMigrator method migrateWithBatchImporter.
private void migrateWithBatchImporter(File storeDir, File migrationDir, long lastTxId, long lastTxChecksum, long lastTxLogVersion, long lastTxLogByteOffset, MigrationProgressMonitor.Section progressMonitor, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
prepareBatchImportMigration(storeDir, migrationDir, oldFormat, newFormat);
boolean requiresDynamicStoreMigration = !newFormat.dynamic().equals(oldFormat.dynamic());
boolean requiresPropertyMigration = !newFormat.property().equals(oldFormat.property()) || requiresDynamicStoreMigration;
File badFile = new File(storeDir, Configuration.BAD_FILE_NAME);
try (NeoStores legacyStore = instantiateLegacyStore(oldFormat, storeDir);
RecordCursors nodeInputCursors = new RecordCursors(legacyStore);
RecordCursors relationshipInputCursors = new RecordCursors(legacyStore);
OutputStream badOutput = new BufferedOutputStream(new FileOutputStream(badFile, false))) {
Configuration importConfig = new Configuration.Overridden(config);
AdditionalInitialIds additionalInitialIds = readAdditionalIds(lastTxId, lastTxChecksum, lastTxLogVersion, lastTxLogByteOffset);
// We have to make sure to keep the token ids if we're migrating properties/labels
BatchImporter importer = new ParallelBatchImporter(migrationDir.getAbsoluteFile(), fileSystem, pageCache, importConfig, logService, withDynamicProcessorAssignment(migrationBatchImporterMonitor(legacyStore, progressMonitor, importConfig), importConfig), additionalInitialIds, config, newFormat);
InputIterable<InputNode> nodes = legacyNodesAsInput(legacyStore, requiresPropertyMigration, nodeInputCursors);
InputIterable<InputRelationship> relationships = legacyRelationshipsAsInput(legacyStore, requiresPropertyMigration, relationshipInputCursors);
importer.doImport(Inputs.input(nodes, relationships, IdMappers.actual(), IdGenerators.fromInput(), Collectors.badCollector(badOutput, 0)));
// During migration the batch importer doesn't necessarily writes all entities, depending on
// which stores needs migration. Node, relationship, relationship group stores are always written
// anyways and cannot be avoided with the importer, but delete the store files that weren't written
// (left empty) so that we don't overwrite those in the real store directory later.
Collection<StoreFile> storesToDeleteFromMigratedDirectory = new ArrayList<>();
storesToDeleteFromMigratedDirectory.add(StoreFile.NEO_STORE);
if (!requiresPropertyMigration) {
// We didn't migrate properties, so the property stores in the migrated store are just empty/bogus
storesToDeleteFromMigratedDirectory.addAll(asList(StoreFile.PROPERTY_STORE, StoreFile.PROPERTY_STRING_STORE, StoreFile.PROPERTY_ARRAY_STORE));
}
if (!requiresDynamicStoreMigration) {
// We didn't migrate labels (dynamic node labels) or any other dynamic store
storesToDeleteFromMigratedDirectory.addAll(asList(StoreFile.NODE_LABEL_STORE, StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.SCHEMA_STORE));
}
StoreFile.fileOperation(DELETE, fileSystem, migrationDir, null, storesToDeleteFromMigratedDirectory, true, null, StoreFileType.values());
// When migrating on a block device there might be some files only accessible via the page cache.
try {
Predicate<FileHandle> fileHandlePredicate = fileHandle -> storesToDeleteFromMigratedDirectory.stream().anyMatch(storeFile -> storeFile.fileName(StoreFileType.STORE).equals(fileHandle.getFile().getName()));
pageCache.streamFilesRecursive(migrationDir).filter(fileHandlePredicate).forEach(FileHandle.HANDLE_DELETE);
} catch (NoSuchFileException e) {
// This means that we had no files only present in the page cache, this is fine.
}
}
}
use of org.neo4j.kernel.impl.storemigration.StoreFile in project neo4j by neo4j.
the class BackupServiceIT method shouldCopyStoreFiles.
@Test
public void shouldCopyStoreFiles() throws Throwable {
// given
defaultBackupPortHostParams();
GraphDatabaseAPI db = dbRule.getGraphDatabaseAPI();
createAndIndexNode(db, 1);
// when
BackupService backupService = backupService();
backupService.doFullBackup(BACKUP_HOST, backupPort, backupDir.getAbsoluteFile(), ConsistencyCheck.NONE, dbRule.getConfigCopy(), BackupClient.BIG_READ_TIMEOUT, false);
db.shutdown();
// then
File[] files = fileSystem.listFiles(backupDir);
assertTrue(files.length > 0);
for (final StoreFile storeFile : StoreFile.values()) {
if (storeFile == COUNTS_STORE_LEFT || storeFile == COUNTS_STORE_RIGHT) {
assertThat(files, anyOf(hasFile(COUNTS_STORE_LEFT.storeFileName()), hasFile(COUNTS_STORE_RIGHT.storeFileName())));
} else {
assertThat(files, hasFile(storeFile.storeFileName()));
}
}
assertEquals(getDbRepresentation(), getBackupDbRepresentation());
}
Aggregations