use of org.neo4j.io.pagecache.IOLimiter in project neo4j by neo4j.
the class RecordStorageEngineTest method mustFlushStoresWithGivenIOLimiter.
@Test
public void mustFlushStoresWithGivenIOLimiter() throws Exception {
IOLimiter limiter = (stamp, completedIOs, swapper) -> 0;
FileSystemAbstraction fs = fsRule.get();
AtomicReference<IOLimiter> observedLimiter = new AtomicReference<>();
PageCache pageCache = new DelegatingPageCache(pageCacheRule.getPageCache(fs)) {
@Override
public void flushAndForce(IOLimiter limiter) throws IOException {
super.flushAndForce(limiter);
observedLimiter.set(limiter);
}
};
RecordStorageEngine engine = storageEngineRule.getWith(fs, pageCache).build();
engine.flushAndForce(limiter);
assertThat(observedLimiter.get(), sameInstance(limiter));
}
use of org.neo4j.io.pagecache.IOLimiter in project neo4j by neo4j.
the class NeoStoreDataSource method buildTransactionLogs.
private NeoStoreTransactionLogModule buildTransactionLogs(File storeDir, Config config, LogProvider logProvider, JobScheduler scheduler, FileSystemAbstraction fileSystemAbstraction, StorageEngine storageEngine, LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader, SynchronizedArrayIdOrderingQueue legacyIndexTransactionOrdering, TransactionIdStore transactionIdStore, LogVersionRepository logVersionRepository) {
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(100_000);
LogHeaderCache logHeaderCache = new LogHeaderCache(1000);
final PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, PhysicalLogFile.DEFAULT_NAME, fileSystemAbstraction);
final PhysicalLogFile logFile = life.add(new PhysicalLogFile(fileSystemAbstraction, logFiles, config.get(GraphDatabaseSettings.logical_log_rotation_threshold), transactionIdStore::getLastCommittedTransactionId, logVersionRepository, physicalLogMonitor, logHeaderCache));
final PhysicalLogFileInformation.LogVersionToTimestamp logInformation = version -> {
LogPosition position = LogPosition.start(version);
try (ReadableLogChannel channel = logFile.getReader(position)) {
LogEntry entry;
while ((entry = logEntryReader.readLogEntry(channel)) != null) {
if (entry instanceof LogEntryStart) {
return entry.<LogEntryStart>as().getTimeWritten();
}
}
}
return -1;
};
final LogFileInformation logFileInformation = new PhysicalLogFileInformation(logFiles, logHeaderCache, transactionIdStore::getLastCommittedTransactionId, logInformation);
if (config.get(GraphDatabaseFacadeFactory.Configuration.ephemeral)) {
config = config.withDefaults(stringMap(GraphDatabaseSettings.keep_logical_logs.name(), "1 files"));
}
String pruningConf = config.get(GraphDatabaseSettings.keep_logical_logs);
LogPruneStrategy logPruneStrategy = fromConfigValue(fs, logFileInformation, logFiles, pruningConf);
final LogPruning logPruning = new LogPruningImpl(logPruneStrategy, logProvider);
final LogRotation logRotation = new LogRotationImpl(monitors.newMonitor(LogRotation.Monitor.class), logFile, databaseHealth);
final TransactionAppender appender = life.add(new BatchingTransactionAppender(logFile, logRotation, transactionMetadataCache, transactionIdStore, legacyIndexTransactionOrdering, databaseHealth));
final LogicalTransactionStore logicalTransactionStore = new PhysicalLogicalTransactionStore(logFile, transactionMetadataCache, logEntryReader);
int txThreshold = config.get(GraphDatabaseSettings.check_point_interval_tx);
final CountCommittedTransactionThreshold countCommittedTransactionThreshold = new CountCommittedTransactionThreshold(txThreshold);
long timeMillisThreshold = config.get(GraphDatabaseSettings.check_point_interval_time);
TimeCheckPointThreshold timeCheckPointThreshold = new TimeCheckPointThreshold(timeMillisThreshold, clock);
CheckPointThreshold threshold = CheckPointThresholds.or(countCommittedTransactionThreshold, timeCheckPointThreshold);
final CheckPointerImpl checkPointer = new CheckPointerImpl(transactionIdStore, threshold, storageEngine, logPruning, appender, databaseHealth, logProvider, tracers.checkPointTracer, ioLimiter, storeCopyCheckPointMutex);
long recurringPeriod = Math.min(timeMillisThreshold, TimeUnit.SECONDS.toMillis(10));
CheckPointScheduler checkPointScheduler = new CheckPointScheduler(checkPointer, scheduler, recurringPeriod, databaseHealth);
life.add(checkPointer);
life.add(checkPointScheduler);
return new NeoStoreTransactionLogModule(logicalTransactionStore, logFileInformation, logFiles, logFile, logRotation, checkPointer, appender, legacyIndexTransactionOrdering);
}
use of org.neo4j.io.pagecache.IOLimiter in project neo4j by neo4j.
the class DefaultRecoverySPI method forceEverything.
@Override
public void forceEverything() {
// Runs during recovery; go as fast as possible.
IOLimiter unlimited = IOLimiter.unlimited();
storageEngine.flushAndForce(unlimited);
}
Aggregations