use of org.neo4j.kernel.impl.transaction.log.ReadOnlyTransactionIdStore in project neo4j by neo4j.
the class TransactionLogRecoveryIT method writePartialTx.
private void writePartialTx(File storeDir) throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache.getPageCache(fs), storeDir);
ReadOnlyTransactionIdStore txIdStore = new ReadOnlyTransactionIdStore(pageCache.getPageCache(fs), storeDir);
PhysicalLogFile logFile = new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
txIdStore::getLastCommittedTransactionId, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), new LogHeaderCache(10));
try (Lifespan ignored = new Lifespan(logFile)) {
LogEntryWriter writer = new LogEntryWriter(logFile.getWriter());
writer.writeStartEntry(0, 0, 0x123456789ABCDEFL, txIdStore.getLastCommittedTransactionId() + 1, new byte[] { 0 });
}
}
use of org.neo4j.kernel.impl.transaction.log.ReadOnlyTransactionIdStore in project neo4j by neo4j.
the class StoreCopyClient method writeTransactionsToActiveLogFile.
private void writeTransactionsToActiveLogFile(File tempStoreDir, Response<?> response) throws Exception {
LifeSupport life = new LifeSupport();
try {
// Start the log and appender
PhysicalLogFiles logFiles = new PhysicalLogFiles(tempStoreDir, fs);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, tempStoreDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, tempStoreDir);
LogFile logFile = life.add(new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
readOnlyTransactionIdStore::getLastCommittedTransactionId, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), logHeaderCache));
life.start();
// Just write all transactions to the active log version. Remember that this is after a store copy
// where there are no logs, and the transaction stream we're about to write will probably contain
// transactions that goes some time back, before the last committed transaction id. So we cannot
// use a TransactionAppender, since it has checks for which transactions one can append.
FlushableChannel channel = logFile.getWriter();
final TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
final AtomicLong firstTxId = new AtomicLong(BASE_TX_ID);
response.accept(new Response.Handler() {
@Override
public void obligation(long txId) throws IOException {
throw new UnsupportedOperationException("Shouldn't be called");
}
@Override
public Visitor<CommittedTransactionRepresentation, Exception> transactions() {
return transaction -> {
long txId = transaction.getCommitEntry().getTxId();
if (firstTxId.compareAndSet(BASE_TX_ID, txId)) {
monitor.startReceivingTransactions(txId);
}
writer.append(transaction.getTransactionRepresentation(), txId);
return false;
};
}
});
long endTxId = firstTxId.get();
if (endTxId != BASE_TX_ID) {
monitor.finishReceivingTransactions(endTxId);
}
long currentLogVersion = logVersionRepository.getCurrentLogVersion();
writer.checkPoint(new LogPosition(currentLogVersion, LOG_HEADER_SIZE));
// And since we write this manually we need to set the correct transaction id in the
// header of the log that we just wrote.
File currentLogFile = logFiles.getLogFileForVersion(currentLogVersion);
writeLogHeader(fs, currentLogFile, currentLogVersion, max(BASE_TX_ID, endTxId - 1));
if (!forensics) {
// since we just create new log and put checkpoint into it with offset equals to
// LOG_HEADER_SIZE we need to update last transaction offset to be equal to this newly defined max
// offset otherwise next checkpoint that use last transaction offset will be created for non
// existing offset that is in most of the cases bigger than new log size.
// Recovery will treat that as last checkpoint and will not try to recover store till new
// last closed transaction offset will not overcome old one. Till that happens it will be
// impossible for recovery process to restore the store
File neoStore = new File(tempStoreDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStore, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET, LOG_HEADER_SIZE);
}
} finally {
life.shutdown();
}
}
use of org.neo4j.kernel.impl.transaction.log.ReadOnlyTransactionIdStore in project neo4j by neo4j.
the class CoreBootstrapperTest method shouldSetAllCoreState.
@Test
public void shouldSetAllCoreState() throws Exception {
// given
int nodeCount = 100;
FileSystemAbstraction fileSystem = fileSystemRule.get();
File classicNeo4jStore = RestoreClusterUtils.createClassicNeo4jStore(testDirectory.directory(), fileSystem, nodeCount, Standard.LATEST_NAME);
PageCache pageCache = pageCacheRule.getPageCache(fileSystem);
CoreBootstrapper bootstrapper = new CoreBootstrapper(classicNeo4jStore, pageCache, fileSystem, Config.defaults(), NullLogProvider.getInstance());
// when
Set<MemberId> membership = asSet(randomMember(), randomMember(), randomMember());
CoreSnapshot snapshot = bootstrapper.bootstrap(membership);
// then
assertEquals(nodeCount, ((IdAllocationState) snapshot.get(CoreStateType.ID_ALLOCATION)).firstUnallocated(IdType.NODE));
/* Bootstrapped state is created in RAFT land at index -1 and term -1. */
assertEquals(0, snapshot.prevIndex());
assertEquals(0, snapshot.prevTerm());
/* Lock is initially not taken. */
assertEquals(new ReplicatedLockTokenState(), snapshot.get(CoreStateType.LOCK_TOKEN));
/* Raft has the bootstrapped set of members initially. */
assertEquals(membership, ((RaftCoreState) snapshot.get(CoreStateType.RAFT_CORE_STATE)).committed().members());
/* The session state is initially empty. */
assertEquals(new GlobalSessionTrackerState(), snapshot.get(CoreStateType.SESSION_TRACKER));
LastCommittedIndexFinder lastCommittedIndexFinder = new LastCommittedIndexFinder(new ReadOnlyTransactionIdStore(pageCache, classicNeo4jStore), new ReadOnlyTransactionStore(pageCache, fileSystem, classicNeo4jStore, new Monitors()), NullLogProvider.getInstance());
long lastCommittedIndex = lastCommittedIndexFinder.getLastCommittedIndex();
assertEquals(-1, lastCommittedIndex);
}
use of org.neo4j.kernel.impl.transaction.log.ReadOnlyTransactionIdStore in project neo4j by neo4j.
the class CoreBootstrapper method appendNullTransactionLogEntryToSetRaftIndexToMinusOne.
private void appendNullTransactionLogEntryToSetRaftIndexToMinusOne() throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, storeDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, storeDir);
PhysicalLogFile logFile = new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
() -> readOnlyTransactionIdStore.getLastClosedTransactionId() - 1, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), new LogHeaderCache(10));
long dummyTransactionId;
try (Lifespan lifespan = new Lifespan(logFile)) {
FlushableChannel channel = logFile.getWriter();
TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
long lastCommittedTransactionId = readOnlyTransactionIdStore.getLastCommittedTransactionId();
PhysicalTransactionRepresentation tx = new PhysicalTransactionRepresentation(Collections.emptyList());
byte[] txHeaderBytes = LogIndexTxHeaderEncoding.encodeLogIndexAsTxHeader(-1);
tx.setHeader(txHeaderBytes, -1, -1, -1, lastCommittedTransactionId, -1, -1);
dummyTransactionId = lastCommittedTransactionId + 1;
writer.append(tx, dummyTransactionId);
channel.prepareForFlush().flush();
}
File neoStoreFile = new File(storeDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStoreFile, LAST_TRANSACTION_ID, dummyTransactionId);
}
use of org.neo4j.kernel.impl.transaction.log.ReadOnlyTransactionIdStore in project neo4j by neo4j.
the class RemoteStore method getPullIndex.
/**
* Later stages of the startup process require at least one transaction to
* figure out the mapping between the transaction log and the consensus log.
*
* If there are no transaction logs then we can pull from and including
* the index which the metadata store points to. This would be the case
* for example with a backup taken during an idle period of the system.
*
* However, if there are transaction logs then we want to find out where
* they end and pull from there, excluding the last one so that we do not
* get duplicate entries.
*/
private long getPullIndex(File storeDir) throws IOException {
/* this is the metadata store */
ReadOnlyTransactionIdStore txIdStore = new ReadOnlyTransactionIdStore(pageCache, storeDir);
/* Clean as in clean shutdown. Without transaction logs this should be the truth,
* but otherwise it can be used as a starting point for scanning the logs. */
long lastCleanTxId = txIdStore.getLastCommittedTransactionId();
/* these are the transaction logs */
ReadOnlyTransactionStore txStore = new ReadOnlyTransactionStore(pageCache, fs, storeDir, new Monitors());
long lastTxId = BASE_TX_ID;
try (Lifespan ignored = new Lifespan(txStore)) {
TransactionCursor cursor;
try {
cursor = txStore.getTransactions(lastCleanTxId);
} catch (NoSuchTransactionException e) {
log.info("No transaction logs found. Will use metadata store as base for pull request.");
return lastCleanTxId;
}
while (cursor.next()) {
CommittedTransactionRepresentation tx = cursor.get();
lastTxId = tx.getCommitEntry().getTxId();
}
if (lastTxId < lastCleanTxId) {
throw new IllegalStateException("Metadata index was higher than transaction log index.");
}
// we don't want to pull a transaction we already have in the log, hence +1
return lastTxId + 1;
}
}
Aggregations