use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class ToFileStoreWriterTest method shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles.
@Test
public void shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles() throws Exception {
// GIVEN
List<FileMoveAction> actions = new ArrayList<>();
PageCache pageCache = spy(pageCacheRule.getPageCache(fs));
ToFileStoreWriter writer = new ToFileStoreWriter(directory.absolutePath(), fs, new StoreCopyClient.Monitor.Adapter(), pageCache, actions);
ByteBuffer tempBuffer = ByteBuffer.allocate(128);
// WHEN
for (StoreType type : StoreType.values()) {
if (type.isRecordStore()) {
String fileName = type.getStoreFile().fileName(STORE);
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, fileName);
}
}
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, NativeLabelScanStore.FILE_NAME);
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class StreamToDiskTest method shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles.
@Test
public void shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles() throws Exception {
// GIVEN
PageCache pageCache = spy(pageCacheRule.getPageCache(fs));
Monitors monitors = new Monitors();
try (StreamToDisk writer = new StreamToDisk(directory.absolutePath(), fs, pageCache, monitors)) {
ByteBuffer tempBuffer = ByteBuffer.allocate(128);
// WHEN
for (StoreType type : StoreType.values()) {
if (type.isRecordStore()) {
String fileName = type.getStoreFile().fileName(STORE);
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, fileName);
}
}
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, NativeLabelScanStore.FILE_NAME);
}
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class StoreMigrator method prepareBatchImportMigration.
private void prepareBatchImportMigration(File storeDir, File migrationDir, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
createStore(migrationDir, newFormat);
// We use the batch importer for migrating the data, and we use it in a special way where we only
// rewrite the stores that have actually changed format. We know that to be node and relationship
// stores. Although since the batch importer also populates the counts store, all labels need to
// be read, i.e. both inlined and those existing in dynamic records. That's why we need to copy
// that dynamic record store over before doing the "batch import".
// Copying this file just as-is assumes that the format hasn't change. If that happens we're in
// a different situation, where we first need to migrate this file.
// The token stores also need to be migrated because we use those as-is and ask for their high ids
// when using the importer in the store migration scenario.
StoreFile[] storesFilesToMigrate = { StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.NODE_LABEL_STORE };
if (newFormat.dynamic().equals(oldFormat.dynamic())) {
// We use the page cache for copying the STORE files since these might be on a block device.
for (StoreFile file : storesFilesToMigrate) {
File fromPath = new File(storeDir, file.fileName(StoreFileType.STORE));
File toPath = new File(migrationDir, file.fileName(StoreFileType.STORE));
int pageSize = pageCache.pageSize();
try (PagedFile fromFile = pageCache.map(fromPath, pageSize);
PagedFile toFile = pageCache.map(toPath, pageSize, StandardOpenOption.CREATE);
PageCursor fromCursor = fromFile.io(0L, PagedFile.PF_SHARED_READ_LOCK);
PageCursor toCursor = toFile.io(0L, PagedFile.PF_SHARED_WRITE_LOCK)) {
while (fromCursor.next()) {
toCursor.next();
do {
fromCursor.copyTo(0, toCursor, 0, pageSize);
} while (fromCursor.shouldRetry());
}
} catch (NoSuchFileException e) {
// It is okay for the file to not be there.
}
}
// The ID files are to be kept on the normal file system, hence we use fileOperation to copy them.
StoreFile.fileOperation(COPY, fileSystem, storeDir, migrationDir, Arrays.asList(storesFilesToMigrate), // OK if it's not there (1.9)
true, ExistingTargetStrategy.FAIL, StoreFileType.ID);
} else {
// Migrate all token stores, schema store and dynamic node label ids, keeping their ids intact
DirectRecordStoreMigrator migrator = new DirectRecordStoreMigrator(pageCache, fileSystem, config);
StoreType[] storesToMigrate = { StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.NODE_LABEL, StoreType.SCHEMA };
// Migrate these stores silently because they are usually very small
MigrationProgressMonitor.Section section = SilentMigrationProgressMonitor.NO_OP_SECTION;
migrator.migrate(storeDir, oldFormat, migrationDir, newFormat, section, storesToMigrate, StoreType.NODE);
}
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class RecordStorageEngineTest method verifyMeta.
private void verifyMeta(StoreFileMetadata meta) {
final Optional<StoreType> optional = meta.storeType();
if (optional.isPresent()) {
final StoreType type = optional.get();
final File file = meta.file();
final String fileName = file.getName();
if (type == StoreType.COUNTS) {
final String left = StoreFile.COUNTS_STORE_LEFT.fileName(StoreFileType.STORE);
final String right = StoreFile.COUNTS_STORE_RIGHT.fileName(StoreFileType.STORE);
assertThat(fileName, anyOf(equalTo(left), equalTo(right)));
} else {
final String expected = type.getStoreFile().fileName(StoreFileType.STORE);
assertThat(fileName, equalTo(expected));
assertTrue("File does not exist " + file.getAbsolutePath(), fsRule.get().fileExists(file));
}
final int recordSize = meta.recordSize();
assertTrue(recordSize == RecordFormat.NO_RECORD_SIZE || recordSize > 0);
} else {
fail("Assumed all files to have a store type");
}
}
Aggregations