use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class Runner method call.
@Override
public Long call() throws Exception {
long lastCommittedTransactionId;
try (FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
Lifespan life = new Lifespan()) {
TransactionIdStore transactionIdStore = new DeadSimpleTransactionIdStore();
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(100_000);
LogHeaderCache logHeaderCache = new LogHeaderCache(1000);
LogFile logFile = life.add(createPhysicalLogFile(transactionIdStore, logHeaderCache, fileSystem));
TransactionAppender transactionAppender = life.add(createBatchingTransactionAppender(transactionIdStore, transactionMetadataCache, logFile));
ExecutorService executorService = Executors.newFixedThreadPool(threads);
try {
Future<?>[] handlers = new Future[threads];
for (int i = 0; i < threads; i++) {
TransactionRepresentationFactory factory = new TransactionRepresentationFactory();
Worker task = new Worker(transactionAppender, factory, condition);
handlers[i] = executorService.submit(task);
}
// wait for all the workers to complete
for (Future<?> handle : handlers) {
handle.get();
}
} finally {
executorService.shutdown();
}
lastCommittedTransactionId = transactionIdStore.getLastCommittedTransactionId();
}
return lastCommittedTransactionId;
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class LogPruneStrategyFactoryTest method testLogPruneThresholdsByType.
@Test
public void testLogPruneThresholdsByType() throws Exception {
FileSystemAbstraction fsa = Mockito.mock(FileSystemAbstraction.class);
assertThat(getThresholdByType(fsa, new ThresholdConfigValue("files", 25), ""), instanceOf(FileCountThreshold.class));
assertThat(getThresholdByType(fsa, new ThresholdConfigValue("size", 16000), ""), instanceOf(FileSizeThreshold.class));
assertThat(getThresholdByType(fsa, new ThresholdConfigValue("txs", 4000), ""), instanceOf(EntryCountThreshold.class));
assertThat(getThresholdByType(fsa, new ThresholdConfigValue("entries", 4000), ""), instanceOf(EntryCountThreshold.class));
assertThat(getThresholdByType(fsa, new ThresholdConfigValue("hours", 100), ""), instanceOf(EntryTimespanThreshold.class));
assertThat(getThresholdByType(fsa, new ThresholdConfigValue("days", 100_000), ""), instanceOf(EntryTimespanThreshold.class));
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class CsvImporter method doImport.
@Override
public void doImport() throws IOException {
FileSystemAbstraction fs = outsideWorld.fileSystem();
File storeDir = config.get(DatabaseManagementSystemSettings.database_path);
File logsDir = config.get(GraphDatabaseSettings.logs_directory);
File reportFile = new File(reportFileName);
OutputStream badOutput = new BufferedOutputStream(fs.openAsOutputStream(reportFile, false));
Collector badCollector = badCollector(badOutput, isIgnoringSomething() ? BadCollector.UNLIMITED_TOLERANCE : 0, collect(ignoreBadRelationships, ignoreDuplicateNodes, ignoreExtraColumns));
Configuration configuration = importConfiguration(null, false, config);
CsvInput input = new CsvInput(nodeData(inputEncoding, nodesFiles), defaultFormatNodeFileHeader(), relationshipData(inputEncoding, relationshipsFiles), defaultFormatRelationshipFileHeader(), idType, csvConfiguration(args, false), badCollector, configuration.maxNumberOfProcessors());
ImportTool.doImport(outsideWorld.errorStream(), outsideWorld.errorStream(), storeDir, logsDir, reportFile, fs, nodesFiles, relationshipsFiles, false, input, config, badOutput, configuration);
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class DumpCommandTest method shouldReportAHelpfulErrorIfWeDontHaveWritePermissionsForLock.
@Test
public void shouldReportAHelpfulErrorIfWeDontHaveWritePermissionsForLock() throws Exception {
assumeFalse("We haven't found a way to reliably tests permissions on Windows", SystemUtils.IS_OS_WINDOWS);
try (FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
StoreLocker storeLocker = new StoreLocker(fileSystem)) {
storeLocker.checkLock(databaseDirectory.toFile());
try (Closeable ignored = withPermissions(databaseDirectory.resolve(StoreLocker.STORE_LOCK_FILENAME), emptySet())) {
execute("foo.db");
fail("expected exception");
} catch (CommandFailed e) {
assertThat(e.getMessage(), equalTo("you do not have permission to dump the database -- is Neo4j " + "running as a different user?"));
}
}
}
use of org.neo4j.io.fs.FileSystemAbstraction in project neo4j by neo4j.
the class NeoStoreDataSource method buildTransactionLogs.
private NeoStoreTransactionLogModule buildTransactionLogs(File storeDir, Config config, LogProvider logProvider, JobScheduler scheduler, FileSystemAbstraction fileSystemAbstraction, StorageEngine storageEngine, LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader, SynchronizedArrayIdOrderingQueue legacyIndexTransactionOrdering, TransactionIdStore transactionIdStore, LogVersionRepository logVersionRepository) {
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(100_000);
LogHeaderCache logHeaderCache = new LogHeaderCache(1000);
final PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, PhysicalLogFile.DEFAULT_NAME, fileSystemAbstraction);
final PhysicalLogFile logFile = life.add(new PhysicalLogFile(fileSystemAbstraction, logFiles, config.get(GraphDatabaseSettings.logical_log_rotation_threshold), transactionIdStore::getLastCommittedTransactionId, logVersionRepository, physicalLogMonitor, logHeaderCache));
final PhysicalLogFileInformation.LogVersionToTimestamp logInformation = version -> {
LogPosition position = LogPosition.start(version);
try (ReadableLogChannel channel = logFile.getReader(position)) {
LogEntry entry;
while ((entry = logEntryReader.readLogEntry(channel)) != null) {
if (entry instanceof LogEntryStart) {
return entry.<LogEntryStart>as().getTimeWritten();
}
}
}
return -1;
};
final LogFileInformation logFileInformation = new PhysicalLogFileInformation(logFiles, logHeaderCache, transactionIdStore::getLastCommittedTransactionId, logInformation);
if (config.get(GraphDatabaseFacadeFactory.Configuration.ephemeral)) {
config = config.withDefaults(stringMap(GraphDatabaseSettings.keep_logical_logs.name(), "1 files"));
}
String pruningConf = config.get(GraphDatabaseSettings.keep_logical_logs);
LogPruneStrategy logPruneStrategy = fromConfigValue(fs, logFileInformation, logFiles, pruningConf);
final LogPruning logPruning = new LogPruningImpl(logPruneStrategy, logProvider);
final LogRotation logRotation = new LogRotationImpl(monitors.newMonitor(LogRotation.Monitor.class), logFile, databaseHealth);
final TransactionAppender appender = life.add(new BatchingTransactionAppender(logFile, logRotation, transactionMetadataCache, transactionIdStore, legacyIndexTransactionOrdering, databaseHealth));
final LogicalTransactionStore logicalTransactionStore = new PhysicalLogicalTransactionStore(logFile, transactionMetadataCache, logEntryReader);
int txThreshold = config.get(GraphDatabaseSettings.check_point_interval_tx);
final CountCommittedTransactionThreshold countCommittedTransactionThreshold = new CountCommittedTransactionThreshold(txThreshold);
long timeMillisThreshold = config.get(GraphDatabaseSettings.check_point_interval_time);
TimeCheckPointThreshold timeCheckPointThreshold = new TimeCheckPointThreshold(timeMillisThreshold, clock);
CheckPointThreshold threshold = CheckPointThresholds.or(countCommittedTransactionThreshold, timeCheckPointThreshold);
final CheckPointerImpl checkPointer = new CheckPointerImpl(transactionIdStore, threshold, storageEngine, logPruning, appender, databaseHealth, logProvider, tracers.checkPointTracer, ioLimiter, storeCopyCheckPointMutex);
long recurringPeriod = Math.min(timeMillisThreshold, TimeUnit.SECONDS.toMillis(10));
CheckPointScheduler checkPointScheduler = new CheckPointScheduler(checkPointer, scheduler, recurringPeriod, databaseHealth);
life.add(checkPointer);
life.add(checkPointScheduler);
return new NeoStoreTransactionLogModule(logicalTransactionStore, logFileInformation, logFiles, logFile, logRotation, checkPointer, appender, legacyIndexTransactionOrdering);
}
Aggregations