use of org.neo4j.kernel.impl.transaction.tracing.LogCheckPointEvent in project neo4j by neo4j.
the class DefaultCheckPointerTracerTest method triggerEvent.
private void triggerEvent(DefaultCheckPointerTracer tracer, int eventDuration) {
clock.forward(ThreadLocalRandom.current().nextLong(200), TimeUnit.MILLISECONDS);
try (LogCheckPointEvent event = tracer.beginCheckPoint()) {
clock.forward(eventDuration, TimeUnit.MILLISECONDS);
}
jobScheduler.runJob();
}
use of org.neo4j.kernel.impl.transaction.tracing.LogCheckPointEvent in project neo4j by neo4j.
the class CheckPointerImpl method doCheckPoint.
private long doCheckPoint(TriggerInfo triggerInfo) throws IOException {
var databaseTracer = tracers.getDatabaseTracer();
var pageCacheTracer = tracers.getPageCacheTracer();
var versionContext = versionContextSupplier.createVersionContext();
try (var cursorContext = new CursorContext(pageCacheTracer.createPageCursorTracer(CHECKPOINT_TAG), versionContext);
LogCheckPointEvent event = databaseTracer.beginCheckPoint()) {
long[] lastClosedTransaction = metadataProvider.getLastClosedTransaction();
long lastClosedTransactionId = lastClosedTransaction[0];
versionContext.initWrite(lastClosedTransactionId);
LogPosition logPosition = new LogPosition(lastClosedTransaction[1], lastClosedTransaction[2]);
String checkpointReason = triggerInfo.describe(lastClosedTransactionId);
/*
* Check kernel health before going into waiting for transactions to be closed, to avoid
* getting into a scenario where we would await a condition that would potentially never
* happen.
*/
databaseHealth.assertHealthy(IOException.class);
/*
* First we flush the store. If we fail now or during the flush, on recovery we'll find the
* earlier check point and replay from there all the log entries. Everything will be ok.
*/
msgLog.info(checkpointReason + " checkpoint started...");
Stopwatch startTime = Stopwatch.start();
forceOperation.flushAndForce(cursorContext);
/*
* Check kernel health before going to write the next check point. In case of a panic this check point
* will be aborted, which is the safest alternative so that the next recovery will have a chance to
* repair the damages.
*/
databaseHealth.assertHealthy(IOException.class);
checkpointAppender.checkPoint(event, logPosition, clock.instant(), checkpointReason);
threshold.checkPointHappened(lastClosedTransactionId);
long durationMillis = startTime.elapsed(MILLISECONDS);
msgLog.info(checkpointReason + " checkpoint completed in " + duration(durationMillis));
event.checkpointCompleted(durationMillis);
/*
* Prune up to the version pointed from the latest check point,
* since it might be an earlier version than the current log version.
*/
logPruning.pruneLogs(logPosition.getLogVersion());
lastCheckPointedTx = lastClosedTransactionId;
return lastClosedTransactionId;
} catch (Throwable t) {
// Why only log failure here? It's because check point can potentially be made from various
// points of execution e.g. background thread triggering check point if needed and during
// shutdown where it's better to have more control over failure handling.
msgLog.error("Checkpoint failed", t);
throw t;
}
}
Aggregations