use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class ReplicatedLockTokenStateMachineTest method shouldBeIdempotent.
@Test
public void shouldBeIdempotent() throws Exception {
// given
EphemeralFileSystemAbstraction fsa = fileSystemRule.get();
fsa.mkdir(testDir.directory());
StateMarshal<ReplicatedLockTokenState> marshal = new ReplicatedLockTokenState.Marshal(new MemberId.Marshal());
DurableStateStorage<ReplicatedLockTokenState> storage = new DurableStateStorage<>(fsa, testDir.directory(), "state", marshal, 100, NullLogProvider.getInstance());
try (Lifespan lifespan = new Lifespan(storage)) {
ReplicatedLockTokenStateMachine stateMachine = new ReplicatedLockTokenStateMachine(storage);
MemberId memberA = member(0);
MemberId memberB = member(1);
stateMachine.applyCommand(new ReplicatedLockTokenRequest(memberA, 0), 3, r -> {
});
// when
stateMachine.applyCommand(new ReplicatedLockTokenRequest(memberB, 1), 2, r -> {
});
// then
assertEquals(memberA, stateMachine.currentToken().owner());
}
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class CoreBootstrapper method appendNullTransactionLogEntryToSetRaftIndexToMinusOne.
private void appendNullTransactionLogEntryToSetRaftIndexToMinusOne() throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, storeDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, storeDir);
PhysicalLogFile logFile = new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
() -> readOnlyTransactionIdStore.getLastClosedTransactionId() - 1, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), new LogHeaderCache(10));
long dummyTransactionId;
try (Lifespan lifespan = new Lifespan(logFile)) {
FlushableChannel channel = logFile.getWriter();
TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
long lastCommittedTransactionId = readOnlyTransactionIdStore.getLastCommittedTransactionId();
PhysicalTransactionRepresentation tx = new PhysicalTransactionRepresentation(Collections.emptyList());
byte[] txHeaderBytes = LogIndexTxHeaderEncoding.encodeLogIndexAsTxHeader(-1);
tx.setHeader(txHeaderBytes, -1, -1, -1, lastCommittedTransactionId, -1, -1);
dummyTransactionId = lastCommittedTransactionId + 1;
writer.append(tx, dummyTransactionId);
channel.prepareForFlush().flush();
}
File neoStoreFile = new File(storeDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStoreFile, LAST_TRANSACTION_ID, dummyTransactionId);
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class DumpClusterState method dumpState.
private void dumpState(String name, StateMarshal<?> marshal) {
int rotationSize = Config.defaults().get(CausalClusteringSettings.replicated_lock_token_state_size);
DurableStateStorage<?> storage = new DurableStateStorage<>(fs, clusterStateDirectory, name, marshal, rotationSize, NullLogProvider.getInstance());
if (storage.exists()) {
try (Lifespan ignored = new Lifespan(storage)) {
out.println(name + ": " + storage.getInitialState());
}
}
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class BackupService method incrementalWithContext.
/**
* Performs an incremental backup based off the given context. This means
* receiving and applying selectively (i.e. irrespective of the actual state
* of the target db) a set of transactions starting at the desired txId and
* spanning up to the latest of the master
*
* @param targetDb The database that contains a previous full copy
* @param context The context, containing transaction id to start streaming transaction from
* @return A backup context, ready to perform
*/
private BackupOutcome incrementalWithContext(String sourceHostNameOrIp, int sourcePort, GraphDatabaseAPI targetDb, long timeout, RequestContext context) throws IncrementalBackupNotPossibleException {
DependencyResolver resolver = targetDb.getDependencyResolver();
ProgressTxHandler handler = new ProgressTxHandler();
TransactionCommittingResponseUnpacker unpacker = new TransactionCommittingResponseUnpacker(resolver, DEFAULT_BATCH_SIZE, 0);
Monitors monitors = resolver.resolveDependency(Monitors.class);
LogProvider logProvider = resolver.resolveDependency(LogService.class).getInternalLogProvider();
BackupClient client = new BackupClient(sourceHostNameOrIp, sourcePort, null, logProvider, targetDb.storeId(), timeout, unpacker, monitors.newMonitor(ByteCounterMonitor.class, BackupClient.class), monitors.newMonitor(RequestMonitor.class, BackupClient.class), new VersionAwareLogEntryReader<>());
try (Lifespan lifespan = new Lifespan(unpacker, client)) {
try (Response<Void> response = client.incrementalBackup(context)) {
unpacker.unpackResponse(response, handler);
}
} catch (MismatchingStoreIdException e) {
throw new RuntimeException(DIFFERENT_STORE, e);
} catch (RuntimeException | IOException e) {
if (e.getCause() != null && e.getCause() instanceof MissingLogDataException) {
throw new IncrementalBackupNotPossibleException(TOO_OLD_BACKUP, e.getCause());
}
if (e.getCause() != null && e.getCause() instanceof ConnectException) {
throw new RuntimeException(e.getMessage(), e.getCause());
}
throw new RuntimeException("Failed to perform incremental backup.", e);
} catch (Throwable throwable) {
throw new RuntimeException("Unexpected error", throwable);
}
return new BackupOutcome(handler.getLastSeenTransactionId(), true);
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class RemoteStore method getPullIndex.
/**
* Later stages of the startup process require at least one transaction to
* figure out the mapping between the transaction log and the consensus log.
*
* If there are no transaction logs then we can pull from and including
* the index which the metadata store points to. This would be the case
* for example with a backup taken during an idle period of the system.
*
* However, if there are transaction logs then we want to find out where
* they end and pull from there, excluding the last one so that we do not
* get duplicate entries.
*/
private long getPullIndex(File storeDir) throws IOException {
/* this is the metadata store */
ReadOnlyTransactionIdStore txIdStore = new ReadOnlyTransactionIdStore(pageCache, storeDir);
/* Clean as in clean shutdown. Without transaction logs this should be the truth,
* but otherwise it can be used as a starting point for scanning the logs. */
long lastCleanTxId = txIdStore.getLastCommittedTransactionId();
/* these are the transaction logs */
ReadOnlyTransactionStore txStore = new ReadOnlyTransactionStore(pageCache, fs, storeDir, new Monitors());
long lastTxId = BASE_TX_ID;
try (Lifespan ignored = new Lifespan(txStore)) {
TransactionCursor cursor;
try {
cursor = txStore.getTransactions(lastCleanTxId);
} catch (NoSuchTransactionException e) {
log.info("No transaction logs found. Will use metadata store as base for pull request.");
return lastCleanTxId;
}
while (cursor.next()) {
CommittedTransactionRepresentation tx = cursor.get();
lastTxId = tx.getCommitEntry().getTxId();
}
if (lastTxId < lastCleanTxId) {
throw new IllegalStateException("Metadata index was higher than transaction log index.");
}
// we don't want to pull a transaction we already have in the log, hence +1
return lastTxId + 1;
}
}
Aggregations