use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class StreamToDiskTest method shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles.
@Test
public void shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles() throws Exception {
// GIVEN
PageCache pageCache = spy(pageCacheRule.getPageCache(fs));
Monitors monitors = new Monitors();
try (StreamToDisk writer = new StreamToDisk(directory.absolutePath(), fs, pageCache, monitors)) {
ByteBuffer tempBuffer = ByteBuffer.allocate(128);
// WHEN
for (StoreType type : StoreType.values()) {
if (type.isRecordStore()) {
String fileName = type.getStoreFile().fileName(STORE);
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, fileName);
}
}
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, NativeLabelScanStore.FILE_NAME);
}
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class StoreMigrator method prepareBatchImportMigration.
private void prepareBatchImportMigration(File storeDir, File migrationDir, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
createStore(migrationDir, newFormat);
// We use the batch importer for migrating the data, and we use it in a special way where we only
// rewrite the stores that have actually changed format. We know that to be node and relationship
// stores. Although since the batch importer also populates the counts store, all labels need to
// be read, i.e. both inlined and those existing in dynamic records. That's why we need to copy
// that dynamic record store over before doing the "batch import".
// Copying this file just as-is assumes that the format hasn't change. If that happens we're in
// a different situation, where we first need to migrate this file.
// The token stores also need to be migrated because we use those as-is and ask for their high ids
// when using the importer in the store migration scenario.
StoreFile[] storesFilesToMigrate = { StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.NODE_LABEL_STORE };
if (newFormat.dynamic().equals(oldFormat.dynamic())) {
// We use the page cache for copying the STORE files since these might be on a block device.
for (StoreFile file : storesFilesToMigrate) {
File fromPath = new File(storeDir, file.fileName(StoreFileType.STORE));
File toPath = new File(migrationDir, file.fileName(StoreFileType.STORE));
int pageSize = pageCache.pageSize();
try (PagedFile fromFile = pageCache.map(fromPath, pageSize);
PagedFile toFile = pageCache.map(toPath, pageSize, StandardOpenOption.CREATE);
PageCursor fromCursor = fromFile.io(0L, PagedFile.PF_SHARED_READ_LOCK);
PageCursor toCursor = toFile.io(0L, PagedFile.PF_SHARED_WRITE_LOCK)) {
while (fromCursor.next()) {
toCursor.next();
do {
fromCursor.copyTo(0, toCursor, 0, pageSize);
} while (fromCursor.shouldRetry());
}
} catch (NoSuchFileException e) {
// It is okay for the file to not be there.
}
}
// The ID files are to be kept on the normal file system, hence we use fileOperation to copy them.
StoreFile.fileOperation(COPY, fileSystem, storeDir, migrationDir, Arrays.asList(storesFilesToMigrate), // OK if it's not there (1.9)
true, ExistingTargetStrategy.FAIL, StoreFileType.ID);
} else {
// Migrate all token stores, schema store and dynamic node label ids, keeping their ids intact
DirectRecordStoreMigrator migrator = new DirectRecordStoreMigrator(pageCache, fileSystem, config);
StoreType[] storesToMigrate = { StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.NODE_LABEL, StoreType.SCHEMA };
// Migrate these stores silently because they are usually very small
MigrationProgressMonitor.Section section = SilentMigrationProgressMonitor.NO_OP_SECTION;
migrator.migrate(storeDir, oldFormat, migrationDir, newFormat, section, storesToMigrate, StoreType.NODE);
}
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class RecordStorageEngineTest method verifyMeta.
private void verifyMeta(StoreFileMetadata meta) {
final Optional<StoreType> optional = meta.storeType();
if (optional.isPresent()) {
final StoreType type = optional.get();
final File file = meta.file();
final String fileName = file.getName();
if (type == StoreType.COUNTS) {
final String left = StoreFile.COUNTS_STORE_LEFT.fileName(StoreFileType.STORE);
final String right = StoreFile.COUNTS_STORE_RIGHT.fileName(StoreFileType.STORE);
assertThat(fileName, anyOf(equalTo(left), equalTo(right)));
} else {
final String expected = type.getStoreFile().fileName(StoreFileType.STORE);
assertThat(fileName, equalTo(expected));
assertTrue("File does not exist " + file.getAbsolutePath(), fsRule.get().fileExists(file));
}
final int recordSize = meta.recordSize();
assertTrue(recordSize == RecordFormat.NO_RECORD_SIZE || recordSize > 0);
} else {
fail("Assumed all files to have a store type");
}
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class ImportCommandTest method shouldKeepStoreFilesAfterFailedImport.
@Test
void shouldKeepStoreFilesAfterFailedImport() throws Exception {
// GIVEN
List<String> nodeIds = nodeIds();
Configuration config = Configuration.COMMAS;
final var configFile = prepareDefaultConfigFile();
// WHEN data file contains more columns than header file
int extraColumns = 3;
var e = assertThrows(InputException.class, () -> runImport("--additional-config=" + configFile.toAbsolutePath().toString(), "--nodes", nodeHeader(config).toAbsolutePath().toString() + "," + nodeData(false, config, nodeIds, TRUE, Charset.defaultCharset(), extraColumns).toAbsolutePath().toString()));
// THEN the store files should be there
for (StoreType storeType : StoreType.values()) {
assertTrue(Files.exists(databaseLayout.file(storeType.getDatabaseFile())));
}
List<String> errorLines = suppressOutput.getErrorVoice().lines();
assertContains(errorLines, "Starting a database on these store files will likely fail or observe inconsistent records");
}
use of org.neo4j.kernel.impl.store.StoreType in project neo4j by neo4j.
the class IdGeneratorMigrator method createEmptyPlaceHolderStoreFiles.
private Set<Path> createEmptyPlaceHolderStoreFiles(DatabaseLayout layout, RecordFormats format) {
Set<Path> createdStores = new HashSet<>();
StoreType[] storesToCreate = Stream.of(StoreType.values()).filter(t -> {
Path file = layout.file(t.getDatabaseFile());
boolean exists = fileSystem.fileExists(file);
if (!exists) {
createdStores.add(file);
}
return !exists;
}).toArray(StoreType[]::new);
createStoreFactory(layout, format, new ScanOnOpenReadOnlyIdGeneratorFactory()).openNeoStores(true, storesToCreate).close();
return createdStores;
}
Aggregations