use of org.neo4j.kernel.impl.transaction.log.LogPosition in project neo4j by neo4j.
the class TransactionLogCatchUpWriter method close.
@Override
public synchronized void close() throws IOException {
if (asPartOfStoreCopy) {
/* A checkpoint which points to the beginning of the log file, meaning that
all the streamed transactions will be applied as part of recovery. */
long logVersion = logFiles.getHighestLogVersion();
writer.checkPoint(new LogPosition(logVersion, LOG_HEADER_SIZE));
// * comment copied from old StoreCopyClient *
// since we just create new log and put checkpoint into it with offset equals to
// LOG_HEADER_SIZE we need to update last transaction offset to be equal to this newly defined max
// offset otherwise next checkpoint that use last transaction offset will be created for non
// existing offset that is in most of the cases bigger than new log size.
// Recovery will treat that as last checkpoint and will not try to recover store till new
// last closed transaction offset will not overcome old one. Till that happens it will be
// impossible for recovery process to restore the store
File neoStore = new File(storeDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStore, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET, LOG_HEADER_SIZE);
}
lifespan.close();
if (lastTxId != -1) {
File neoStoreFile = new File(storeDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStoreFile, LAST_TRANSACTION_ID, lastTxId);
}
}
use of org.neo4j.kernel.impl.transaction.log.LogPosition in project neo4j by neo4j.
the class RaftLogMetadataCacheTest method shouldRemoveUpTo.
@Test
public void shouldRemoveUpTo() throws Exception {
// given
int cacheSize = 100;
RaftLogMetadataCache cache = new RaftLogMetadataCache(cacheSize);
for (int i = 0; i < cacheSize; i++) {
cache.cacheMetadata(i, i, new LogPosition(i, i));
}
// when
int upTo = 30;
cache.removeUpTo(upTo);
// then
long i = 0;
for (; i <= upTo; i++) {
assertNull(cache.getMetadata(i));
}
for (; i < cacheSize; i++) {
RaftLogMetadataCache.RaftLogEntryMetadata metadata = cache.getMetadata(i);
assertNotNull(metadata);
assertEquals(i, metadata.getEntryTerm());
}
}
use of org.neo4j.kernel.impl.transaction.log.LogPosition in project neo4j by neo4j.
the class RaftLogMetadataCacheTest method shouldAcceptAndReturnIndexesInRangeJustDeleted.
@Test
public void shouldAcceptAndReturnIndexesInRangeJustDeleted() throws Exception {
// given
int cacheSize = 100;
RaftLogMetadataCache cache = new RaftLogMetadataCache(cacheSize);
for (int i = 0; i < cacheSize; i++) {
cache.cacheMetadata(i, i, new LogPosition(i, i));
}
// when
int upFrom = 60;
cache.removeUpwardsFrom(upFrom);
// and we add something in the deleted range
int insertedIndex = 70;
long insertedTerm = 150;
cache.cacheMetadata(insertedIndex, insertedTerm, new LogPosition(insertedIndex, insertedIndex));
// then
// nothing should be resurrected in the deleted range just because we inserted something there
int i = upFrom;
for (; i < insertedIndex; i++) {
assertNull(cache.getMetadata(i));
}
// i here should be insertedIndex
assertEquals(insertedTerm, cache.getMetadata(i).getEntryTerm());
// to continue iteration in the rest of the deleted range
i++;
for (; i < cacheSize; i++) {
assertNull(cache.getMetadata(i));
}
}
use of org.neo4j.kernel.impl.transaction.log.LogPosition in project neo4j by neo4j.
the class RaftLogMetadataCacheTest method shouldRemoveUpwardsFrom.
@Test
public void shouldRemoveUpwardsFrom() throws Exception {
// given
int cacheSize = 100;
RaftLogMetadataCache cache = new RaftLogMetadataCache(cacheSize);
for (int i = 0; i < cacheSize; i++) {
cache.cacheMetadata(i, i, new LogPosition(i, i));
}
// when
int upFrom = 60;
cache.removeUpwardsFrom(upFrom);
// then
long i = 0;
for (; i < upFrom; i++) {
RaftLogMetadataCache.RaftLogEntryMetadata metadata = cache.getMetadata(i);
assertNotNull(metadata);
assertEquals(i, metadata.getEntryTerm());
}
for (; i < cacheSize; i++) {
assertNull(cache.getMetadata(i));
}
}
use of org.neo4j.kernel.impl.transaction.log.LogPosition in project neo4j by neo4j.
the class CheckPointerImpl method doCheckPoint.
private long doCheckPoint(TriggerInfo triggerInfo, LogCheckPointEvent logCheckPointEvent) throws IOException {
try {
long[] lastClosedTransaction = transactionIdStore.getLastClosedTransaction();
long lastClosedTransactionId = lastClosedTransaction[0];
LogPosition logPosition = new LogPosition(lastClosedTransaction[1], lastClosedTransaction[2]);
String prefix = triggerInfo.describe(lastClosedTransactionId);
msgLog.info(prefix + " Starting check pointing...");
/*
* Check kernel health before going into waiting for transactions to be closed, to avoid
* getting into a scenario where we would await a condition that would potentially never
* happen.
*/
databaseHealth.assertHealthy(IOException.class);
/*
* First we flush the store. If we fail now or during the flush, on recovery we'll find the
* earlier check point and replay from there all the log entries. Everything will be ok.
*/
msgLog.info(prefix + " Starting store flush...");
storageEngine.flushAndForce(ioLimiter);
msgLog.info(prefix + " Store flush completed");
/*
* Check kernel health before going to write the next check point. In case of a panic this check point
* will be aborted, which is the safest alternative so that the next recovery will have a chance to
* repair the damages.
*/
databaseHealth.assertHealthy(IOException.class);
msgLog.info(prefix + " Starting appending check point entry into the tx log...");
appender.checkPoint(logPosition, logCheckPointEvent);
threshold.checkPointHappened(lastClosedTransactionId);
msgLog.info(prefix + " Appending check point entry into the tx log completed");
msgLog.info(prefix + " Check pointing completed");
/*
* Prune up to the version pointed from the latest check point,
* since it might be an earlier version than the current log version.
*/
logPruning.pruneLogs(logPosition.getLogVersion());
lastCheckPointedTx = lastClosedTransactionId;
return lastClosedTransactionId;
} catch (Throwable t) {
// Why only log failure here? It's because check point can potentially be made from various
// points of execution e.g. background thread triggering check point if needed and during
// shutdown where it's better to have more control over failure handling.
msgLog.error("Error performing check point", t);
throw t;
}
}
Aggregations