use of org.neo4j.storageengine.api.StorageEngine in project neo4j by neo4j.
the class RecoveryTest method recover.
private boolean recover(PhysicalLogFiles logFiles) {
LifeSupport life = new LifeSupport();
Recovery.Monitor monitor = mock(Recovery.Monitor.class);
final AtomicBoolean recoveryRequired = new AtomicBoolean();
try {
StorageEngine storageEngine = mock(StorageEngine.class);
final LogEntryReader<ReadableClosablePositionAwareChannel> reader = new VersionAwareLogEntryReader<>();
LatestCheckPointFinder finder = new LatestCheckPointFinder(logFiles, fileSystemRule.get(), reader);
TransactionMetadataCache metadataCache = new TransactionMetadataCache(100);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
LogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, 50, () -> transactionIdStore.getLastCommittedTransactionId(), logVersionRepository, mock(PhysicalLogFile.Monitor.class), logHeaderCache));
LogicalTransactionStore txStore = new PhysicalLogicalTransactionStore(logFile, metadataCache, reader);
life.add(new Recovery(new DefaultRecoverySPI(storageEngine, logFiles, fileSystemRule.get(), logVersionRepository, finder, transactionIdStore, txStore, NO_MONITOR) {
@Override
public Visitor<CommittedTransactionRepresentation, Exception> startRecovery() {
recoveryRequired.set(true);
return super.startRecovery();
}
}, monitor));
life.start();
} finally {
life.shutdown();
}
return recoveryRequired.get();
}
use of org.neo4j.storageengine.api.StorageEngine in project neo4j by neo4j.
the class RecoveryTest method shouldSeeThatACleanDatabaseShouldNotRequireRecovery.
@Test
public void shouldSeeThatACleanDatabaseShouldNotRequireRecovery() throws Exception {
final PhysicalLogFiles logFiles = new PhysicalLogFiles(directory.directory(), "log", fileSystemRule.get());
File file = logFiles.getLogFileForVersion(logVersion);
writeSomeData(file, new Visitor<Pair<LogEntryWriter, Consumer<LogPositionMarker>>, IOException>() {
@Override
public boolean visit(Pair<LogEntryWriter, Consumer<LogPositionMarker>> pair) throws IOException {
LogEntryWriter writer = pair.first();
Consumer<LogPositionMarker> consumer = pair.other();
LogPositionMarker marker = new LogPositionMarker();
// last committed tx
consumer.accept(marker);
writer.writeStartEntry(0, 1, 2L, 3L, new byte[0]);
writer.writeCommitEntry(4L, 5L);
// check point
consumer.accept(marker);
writer.writeCheckPointEntry(marker.newPosition());
return true;
}
});
LifeSupport life = new LifeSupport();
Recovery.Monitor monitor = mock(Recovery.Monitor.class);
try {
StorageEngine storageEngine = mock(StorageEngine.class);
final LogEntryReader<ReadableClosablePositionAwareChannel> reader = new VersionAwareLogEntryReader<>();
LatestCheckPointFinder finder = new LatestCheckPointFinder(logFiles, fileSystemRule.get(), reader);
TransactionMetadataCache metadataCache = new TransactionMetadataCache(100);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
LogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, 50, () -> transactionIdStore.getLastCommittedTransactionId(), logVersionRepository, mock(PhysicalLogFile.Monitor.class), logHeaderCache));
LogicalTransactionStore txStore = new PhysicalLogicalTransactionStore(logFile, metadataCache, reader);
life.add(new Recovery(new DefaultRecoverySPI(storageEngine, logFiles, fileSystemRule.get(), logVersionRepository, finder, transactionIdStore, txStore, NO_MONITOR) {
@Override
public Visitor<CommittedTransactionRepresentation, Exception> startRecovery() {
fail("Recovery should not be required");
// <-- to satisfy the compiler
return null;
}
}, monitor));
life.start();
verifyZeroInteractions(monitor);
} finally {
life.shutdown();
}
}
use of org.neo4j.storageengine.api.StorageEngine in project neo4j by neo4j.
the class ReplicatedTokenHolder method createCommands.
private byte[] createCommands(String tokenName) {
StorageEngine storageEngine = dependencies.resolveDependency(StorageEngine.class);
Collection<StorageCommand> commands = new ArrayList<>();
TransactionState txState = new TxState();
int tokenId = Math.toIntExact(idGeneratorFactory.get(tokenIdType).nextId());
createToken(txState, tokenName, tokenId);
try (StorageStatement statement = storageEngine.storeReadLayer().newStatement()) {
storageEngine.createCommands(commands, txState, statement, ResourceLocker.NONE, Long.MAX_VALUE);
} catch (CreateConstraintFailureException | TransactionFailureException | ConstraintValidationException e) {
throw new RuntimeException("Unable to create token '" + tokenName + "'", e);
}
return ReplicatedTokenRequestSerializer.commandBytes(commands);
}
use of org.neo4j.storageengine.api.StorageEngine in project neo4j by neo4j.
the class ReplicatedTokenHolderTest method shouldReplicateTokenRequestForNewToken.
@Test
public void shouldReplicateTokenRequestForNewToken() throws Exception {
// given
StorageEngine storageEngine = mockedStorageEngine();
when(dependencies.resolveDependency(StorageEngine.class)).thenReturn(storageEngine);
IdGeneratorFactory idGeneratorFactory = mock(IdGeneratorFactory.class);
IdGenerator idGenerator = mock(IdGenerator.class);
when(idGenerator.nextId()).thenReturn(1L);
when(idGeneratorFactory.get(any(IdType.class))).thenReturn(idGenerator);
TokenRegistry<Token> registry = new TokenRegistry<>("Label");
int generatedTokenId = 1;
ReplicatedTokenHolder<Token> tokenHolder = new ReplicatedLabelTokenHolder(registry, (content, trackResult) -> {
CompletableFuture<Object> completeFuture = new CompletableFuture<>();
completeFuture.complete(generatedTokenId);
return completeFuture;
}, idGeneratorFactory, dependencies);
// when
Integer tokenId = tokenHolder.getOrCreateId("name1");
// then
assertThat(tokenId, equalTo(generatedTokenId));
}
use of org.neo4j.storageengine.api.StorageEngine in project neo4j by neo4j.
the class NeoStoreDataSource method buildTransactionLogs.
private NeoStoreTransactionLogModule buildTransactionLogs(File storeDir, Config config, LogProvider logProvider, JobScheduler scheduler, FileSystemAbstraction fileSystemAbstraction, StorageEngine storageEngine, LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader, SynchronizedArrayIdOrderingQueue legacyIndexTransactionOrdering, TransactionIdStore transactionIdStore, LogVersionRepository logVersionRepository) {
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(100_000);
LogHeaderCache logHeaderCache = new LogHeaderCache(1000);
final PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, PhysicalLogFile.DEFAULT_NAME, fileSystemAbstraction);
final PhysicalLogFile logFile = life.add(new PhysicalLogFile(fileSystemAbstraction, logFiles, config.get(GraphDatabaseSettings.logical_log_rotation_threshold), transactionIdStore::getLastCommittedTransactionId, logVersionRepository, physicalLogMonitor, logHeaderCache));
final PhysicalLogFileInformation.LogVersionToTimestamp logInformation = version -> {
LogPosition position = LogPosition.start(version);
try (ReadableLogChannel channel = logFile.getReader(position)) {
LogEntry entry;
while ((entry = logEntryReader.readLogEntry(channel)) != null) {
if (entry instanceof LogEntryStart) {
return entry.<LogEntryStart>as().getTimeWritten();
}
}
}
return -1;
};
final LogFileInformation logFileInformation = new PhysicalLogFileInformation(logFiles, logHeaderCache, transactionIdStore::getLastCommittedTransactionId, logInformation);
if (config.get(GraphDatabaseFacadeFactory.Configuration.ephemeral)) {
config = config.withDefaults(stringMap(GraphDatabaseSettings.keep_logical_logs.name(), "1 files"));
}
String pruningConf = config.get(GraphDatabaseSettings.keep_logical_logs);
LogPruneStrategy logPruneStrategy = fromConfigValue(fs, logFileInformation, logFiles, pruningConf);
final LogPruning logPruning = new LogPruningImpl(logPruneStrategy, logProvider);
final LogRotation logRotation = new LogRotationImpl(monitors.newMonitor(LogRotation.Monitor.class), logFile, databaseHealth);
final TransactionAppender appender = life.add(new BatchingTransactionAppender(logFile, logRotation, transactionMetadataCache, transactionIdStore, legacyIndexTransactionOrdering, databaseHealth));
final LogicalTransactionStore logicalTransactionStore = new PhysicalLogicalTransactionStore(logFile, transactionMetadataCache, logEntryReader);
int txThreshold = config.get(GraphDatabaseSettings.check_point_interval_tx);
final CountCommittedTransactionThreshold countCommittedTransactionThreshold = new CountCommittedTransactionThreshold(txThreshold);
long timeMillisThreshold = config.get(GraphDatabaseSettings.check_point_interval_time);
TimeCheckPointThreshold timeCheckPointThreshold = new TimeCheckPointThreshold(timeMillisThreshold, clock);
CheckPointThreshold threshold = CheckPointThresholds.or(countCommittedTransactionThreshold, timeCheckPointThreshold);
final CheckPointerImpl checkPointer = new CheckPointerImpl(transactionIdStore, threshold, storageEngine, logPruning, appender, databaseHealth, logProvider, tracers.checkPointTracer, ioLimiter, storeCopyCheckPointMutex);
long recurringPeriod = Math.min(timeMillisThreshold, TimeUnit.SECONDS.toMillis(10));
CheckPointScheduler checkPointScheduler = new CheckPointScheduler(checkPointer, scheduler, recurringPeriod, databaseHealth);
life.add(checkPointer);
life.add(checkPointScheduler);
return new NeoStoreTransactionLogModule(logicalTransactionStore, logFileInformation, logFiles, logFile, logRotation, checkPointer, appender, legacyIndexTransactionOrdering);
}
Aggregations