use of org.neo4j.unsafe.impl.batchimport.input.InputRelationship in project neo4j by neo4j.
the class CsvInputTest method shouldFailOnRelationshipWithMissingStartIdField.
@Test
public void shouldFailOnRelationshipWithMissingStartIdField() throws Exception {
// GIVEN
Iterable<DataFactory<InputRelationship>> data = relationshipData(CsvInputTest.<InputRelationship>data(":START_ID,:END_ID,:TYPE\n" + ",1,"));
Input input = new CsvInput(null, null, data, defaultFormatRelationshipFileHeader(), IdType.INTEGER, config(COMMAS), silentBadCollector(0), getRuntime().availableProcessors());
// WHEN
try (InputIterator<InputRelationship> relationships = input.relationships().iterator()) {
relationships.next();
fail("Should have failed");
} catch (InputException e) {
// THEN good
assertThat(e.getMessage(), containsString(Type.START_ID.name()));
}
}
use of org.neo4j.unsafe.impl.batchimport.input.InputRelationship in project neo4j by neo4j.
the class StoreMigrator method migrateWithBatchImporter.
private void migrateWithBatchImporter(File storeDir, File migrationDir, long lastTxId, long lastTxChecksum, long lastTxLogVersion, long lastTxLogByteOffset, MigrationProgressMonitor.Section progressMonitor, RecordFormats oldFormat, RecordFormats newFormat) throws IOException {
prepareBatchImportMigration(storeDir, migrationDir, oldFormat, newFormat);
boolean requiresDynamicStoreMigration = !newFormat.dynamic().equals(oldFormat.dynamic());
boolean requiresPropertyMigration = !newFormat.property().equals(oldFormat.property()) || requiresDynamicStoreMigration;
File badFile = new File(storeDir, Configuration.BAD_FILE_NAME);
try (NeoStores legacyStore = instantiateLegacyStore(oldFormat, storeDir);
RecordCursors nodeInputCursors = new RecordCursors(legacyStore);
RecordCursors relationshipInputCursors = new RecordCursors(legacyStore);
OutputStream badOutput = new BufferedOutputStream(new FileOutputStream(badFile, false))) {
Configuration importConfig = new Configuration.Overridden(config);
AdditionalInitialIds additionalInitialIds = readAdditionalIds(lastTxId, lastTxChecksum, lastTxLogVersion, lastTxLogByteOffset);
// We have to make sure to keep the token ids if we're migrating properties/labels
BatchImporter importer = new ParallelBatchImporter(migrationDir.getAbsoluteFile(), fileSystem, pageCache, importConfig, logService, withDynamicProcessorAssignment(migrationBatchImporterMonitor(legacyStore, progressMonitor, importConfig), importConfig), additionalInitialIds, config, newFormat);
InputIterable<InputNode> nodes = legacyNodesAsInput(legacyStore, requiresPropertyMigration, nodeInputCursors);
InputIterable<InputRelationship> relationships = legacyRelationshipsAsInput(legacyStore, requiresPropertyMigration, relationshipInputCursors);
importer.doImport(Inputs.input(nodes, relationships, IdMappers.actual(), IdGenerators.fromInput(), Collectors.badCollector(badOutput, 0)));
// During migration the batch importer doesn't necessarily writes all entities, depending on
// which stores needs migration. Node, relationship, relationship group stores are always written
// anyways and cannot be avoided with the importer, but delete the store files that weren't written
// (left empty) so that we don't overwrite those in the real store directory later.
Collection<StoreFile> storesToDeleteFromMigratedDirectory = new ArrayList<>();
storesToDeleteFromMigratedDirectory.add(StoreFile.NEO_STORE);
if (!requiresPropertyMigration) {
// We didn't migrate properties, so the property stores in the migrated store are just empty/bogus
storesToDeleteFromMigratedDirectory.addAll(asList(StoreFile.PROPERTY_STORE, StoreFile.PROPERTY_STRING_STORE, StoreFile.PROPERTY_ARRAY_STORE));
}
if (!requiresDynamicStoreMigration) {
// We didn't migrate labels (dynamic node labels) or any other dynamic store
storesToDeleteFromMigratedDirectory.addAll(asList(StoreFile.NODE_LABEL_STORE, StoreFile.LABEL_TOKEN_STORE, StoreFile.LABEL_TOKEN_NAMES_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_STORE, StoreFile.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE, StoreFile.PROPERTY_KEY_TOKEN_STORE, StoreFile.PROPERTY_KEY_TOKEN_NAMES_STORE, StoreFile.SCHEMA_STORE));
}
StoreFile.fileOperation(DELETE, fileSystem, migrationDir, null, storesToDeleteFromMigratedDirectory, true, null, StoreFileType.values());
// When migrating on a block device there might be some files only accessible via the page cache.
try {
Predicate<FileHandle> fileHandlePredicate = fileHandle -> storesToDeleteFromMigratedDirectory.stream().anyMatch(storeFile -> storeFile.fileName(StoreFileType.STORE).equals(fileHandle.getFile().getName()));
pageCache.streamFilesRecursive(migrationDir).filter(fileHandlePredicate).forEach(FileHandle.HANDLE_DELETE);
} catch (NoSuchFileException e) {
// This means that we had no files only present in the page cache, this is fine.
}
}
}
use of org.neo4j.unsafe.impl.batchimport.input.InputRelationship in project neo4j by neo4j.
the class StoreMigrator method legacyRelationshipsAsInput.
private InputIterable<InputRelationship> legacyRelationshipsAsInput(NeoStores legacyStore, boolean requiresPropertyMigration, RecordCursors cursors) {
RelationshipStore store = legacyStore.getRelationshipStore();
final BiConsumer<InputRelationship, RelationshipRecord> propertyDecorator = propertyDecorator(requiresPropertyMigration, cursors);
return new StoreScanAsInputIterable<InputRelationship, RelationshipRecord>(store) {
@Override
protected InputRelationship inputEntityOf(RelationshipRecord record) {
InputRelationship result = new InputRelationship("legacy store", record.getId(), record.getId() * RelationshipRecordFormat.RECORD_SIZE, InputEntity.NO_PROPERTIES, record.getNextProp(), record.getFirstNode(), record.getSecondNode(), null, record.getType());
propertyDecorator.accept(result, record);
return result;
}
};
}
use of org.neo4j.unsafe.impl.batchimport.input.InputRelationship in project neo4j by neo4j.
the class ParallelBatchImporter method doImport.
@Override
public void doImport(Input input) throws IOException {
log.info("Import starting");
// Things that we need to close later. The reason they're not in the try-with-resource statement
// is that we need to close, and set to null, at specific points preferably. So use good ol' finally block.
NodeRelationshipCache nodeRelationshipCache = null;
NodeLabelsCache nodeLabelsCache = null;
long startTime = currentTimeMillis();
CountingStoreUpdateMonitor storeUpdateMonitor = new CountingStoreUpdateMonitor();
try (BatchingNeoStores neoStore = getBatchingNeoStores();
CountsAccessor.Updater countsUpdater = neoStore.getCountsStore().reset(neoStore.getLastCommittedTransactionId());
InputCache inputCache = new InputCache(fileSystem, storeDir, recordFormats, config)) {
Collector badCollector = input.badCollector();
// Some temporary caches and indexes in the import
IoMonitor writeMonitor = new IoMonitor(neoStore.getIoTracer());
IdMapper idMapper = input.idMapper();
IdGenerator idGenerator = input.idGenerator();
nodeRelationshipCache = new NodeRelationshipCache(AUTO, config.denseNodeThreshold());
StatsProvider memoryUsageStats = new MemoryUsageStatsProvider(nodeRelationshipCache, idMapper);
InputIterable<InputNode> nodes = input.nodes();
InputIterable<InputRelationship> relationships = input.relationships();
InputIterable<InputNode> cachedNodes = cachedForSure(nodes, inputCache.nodes(MAIN, true));
InputIterable<InputRelationship> cachedRelationships = cachedForSure(relationships, inputCache.relationships(MAIN, true));
RelationshipStore relationshipStore = neoStore.getRelationshipStore();
// Stage 1 -- nodes, properties, labels
NodeStage nodeStage = new NodeStage(config, writeMonitor, nodes, idMapper, idGenerator, neoStore, inputCache, neoStore.getLabelScanStore(), storeUpdateMonitor, nodeRelationshipCache, memoryUsageStats);
executeStage(nodeStage);
if (idMapper.needsPreparation()) {
executeStage(new IdMapperPreparationStage(config, idMapper, cachedNodes, badCollector, memoryUsageStats));
PrimitiveLongIterator duplicateNodeIds = badCollector.leftOverDuplicateNodesIds();
if (duplicateNodeIds.hasNext()) {
executeStage(new DeleteDuplicateNodesStage(config, duplicateNodeIds, neoStore));
}
}
// Stage 2 -- calculate dense node threshold
CalculateDenseNodesStage calculateDenseNodesStage = new CalculateDenseNodesStage(withBatchSize(config, config.batchSize() * 10), relationships, nodeRelationshipCache, idMapper, badCollector, inputCache, neoStore);
executeStage(calculateDenseNodesStage);
importRelationships(nodeRelationshipCache, storeUpdateMonitor, neoStore, writeMonitor, idMapper, cachedRelationships, inputCache, calculateDenseNodesStage.getRelationshipTypes(Long.MAX_VALUE), calculateDenseNodesStage.getRelationshipTypes(100));
// Release this potentially really big piece of cached data
long peakMemoryUsage = totalMemoryUsageOf(idMapper, nodeRelationshipCache);
long highNodeId = nodeRelationshipCache.getHighNodeId();
idMapper.close();
idMapper = null;
nodeRelationshipCache.close();
nodeRelationshipCache = null;
new RelationshipGroupDefragmenter(config, executionMonitor).run(max(max(peakMemoryUsage, highNodeId * 4), mebiBytes(1)), neoStore, highNodeId);
// Stage 6 -- count nodes per label and labels per node
nodeLabelsCache = new NodeLabelsCache(AUTO, neoStore.getLabelRepository().getHighId());
memoryUsageStats = new MemoryUsageStatsProvider(nodeLabelsCache);
executeStage(new NodeCountsStage(config, nodeLabelsCache, neoStore.getNodeStore(), neoStore.getLabelRepository().getHighId(), countsUpdater, memoryUsageStats));
// Stage 7 -- count label-[type]->label
executeStage(new RelationshipCountsStage(config, nodeLabelsCache, relationshipStore, neoStore.getLabelRepository().getHighId(), neoStore.getRelationshipTypeRepository().getHighId(), countsUpdater, AUTO));
// We're done, do some final logging about it
long totalTimeMillis = currentTimeMillis() - startTime;
executionMonitor.done(totalTimeMillis, format("%n") + storeUpdateMonitor.toString() + format("%n") + "Peak memory usage: " + bytes(peakMemoryUsage));
log.info("Import completed, took " + Format.duration(totalTimeMillis) + ". " + storeUpdateMonitor);
} catch (Throwable t) {
log.error("Error during import", t);
throw Exceptions.launderedException(IOException.class, t);
} finally {
if (nodeRelationshipCache != null) {
nodeRelationshipCache.close();
}
if (nodeLabelsCache != null) {
nodeLabelsCache.close();
}
}
}
use of org.neo4j.unsafe.impl.batchimport.input.InputRelationship in project neo4j by neo4j.
the class ParallelBatchImporter method importRelationships.
private void importRelationships(NodeRelationshipCache nodeRelationshipCache, CountingStoreUpdateMonitor storeUpdateMonitor, BatchingNeoStores neoStore, IoMonitor writeMonitor, IdMapper idMapper, InputIterable<InputRelationship> relationships, InputCache inputCache, Object[] allRelationshipTypes, Object[] minorityRelationshipTypes) {
// Imports the relationships from the Input. This isn't a straight forward as importing nodes,
// since keeping track of and updating heads of relationship chains in scenarios where most nodes
// are dense and there are many relationship types scales poorly w/ regards to cache memory usage
// also as a side-effect time required to update this cache.
//
// The approach is instead to do multiple iterations where each iteration imports relationships
// of a single type. For each iteration Node --> Relationship and Relationship --> Relationship
// stages _for dense nodes only_ are run so that the cache can be reused to hold relationship chain heads
// of the next type in the next iteration. All relationships will be imported this way and then
// finally there will be one Node --> Relationship and Relationship --> Relationship stage linking
// all sparse relationship chains together.
Set<Object> minorityRelationshipTypeSet = asSet(minorityRelationshipTypes);
PerTypeRelationshipSplitter perTypeIterator = new PerTypeRelationshipSplitter(relationships.iterator(), allRelationshipTypes, minorityRelationshipTypeSet::contains, neoStore.getRelationshipTypeRepository(), inputCache);
long nextRelationshipId = 0;
Configuration relationshipConfig = withBatchSize(config, neoStore.getRelationshipStore().getRecordsPerPage());
Configuration nodeConfig = withBatchSize(config, neoStore.getNodeStore().getRecordsPerPage());
for (int i = 0; perTypeIterator.hasNext(); i++) {
// Stage 3a -- relationships, properties
nodeRelationshipCache.setForwardScan(true);
Object currentType = perTypeIterator.currentType();
int currentTypeId = neoStore.getRelationshipTypeRepository().getOrCreateId(currentType);
InputIterator<InputRelationship> perType = perTypeIterator.next();
String topic = " [:" + currentType + "] (" + (i + 1) + "/" + allRelationshipTypes.length + ")";
final RelationshipStage relationshipStage = new RelationshipStage(topic, config, writeMonitor, perType, idMapper, neoStore, nodeRelationshipCache, storeUpdateMonitor, nextRelationshipId);
executeStage(relationshipStage);
// Stage 4a -- set node nextRel fields for dense nodes
executeStage(new NodeFirstRelationshipStage(topic, nodeConfig, neoStore.getNodeStore(), neoStore.getTemporaryRelationshipGroupStore(), nodeRelationshipCache, true, /*dense*/
currentTypeId));
// Stage 5a -- link relationship chains together for dense nodes
nodeRelationshipCache.setForwardScan(false);
executeStage(new RelationshipLinkbackStage(topic, relationshipConfig, neoStore.getRelationshipStore(), nodeRelationshipCache, nextRelationshipId, relationshipStage.getNextRelationshipId(), true));
nextRelationshipId = relationshipStage.getNextRelationshipId();
// cheap higher level clearing
nodeRelationshipCache.clearChangedChunks(true);
}
String topic = " Sparse";
nodeRelationshipCache.setForwardScan(true);
// Stage 4b -- set node nextRe fields for sparse nodes
executeStage(new NodeFirstRelationshipStage(topic, nodeConfig, neoStore.getNodeStore(), neoStore.getTemporaryRelationshipGroupStore(), nodeRelationshipCache, false, /*sparse*/
-1));
// Stage 5b -- link relationship chains together for sparse nodes
nodeRelationshipCache.setForwardScan(false);
executeStage(new RelationshipLinkbackStage(topic, relationshipConfig, neoStore.getRelationshipStore(), nodeRelationshipCache, 0, nextRelationshipId, false));
if (minorityRelationshipTypes.length > 0) {
// Do some batch insertion style random-access insertions for super small minority types
executeStage(new BatchInsertRelationshipsStage(config, idMapper, perTypeIterator.getMinorityRelationships(), neoStore, nextRelationshipId));
}
}
Aggregations