use of org.neo4j.kernel.impl.transaction.log.PhysicalLogFiles in project neo4j by neo4j.
the class ReadReplicaReplicationIT method shouldBeAbleToPullTxAfterHavingDownloadedANewStoreAfterPruning.
@Test
public void shouldBeAbleToPullTxAfterHavingDownloadedANewStoreAfterPruning() throws Exception {
// given
Map<String, String> params = stringMap(GraphDatabaseSettings.keep_logical_logs.name(), "keep_none", GraphDatabaseSettings.logical_log_rotation_threshold.name(), "1M", GraphDatabaseSettings.check_point_interval_time.name(), "100ms");
Cluster cluster = clusterRule.withSharedCoreParams(params).startCluster();
cluster.coreTx((db, tx) -> {
createData(db, 10);
tx.success();
});
awaitEx(() -> readReplicasUpToDateAsTheLeader(cluster.awaitLeader(), cluster.readReplicas()), 1, TimeUnit.MINUTES);
ReadReplica readReplica = cluster.getReadReplicaById(0);
long highestReadReplicaLogVersion = physicalLogFiles(readReplica).getHighestLogVersion();
readReplica.shutdown();
CoreClusterMember core;
do {
core = cluster.coreTx((db, tx) -> {
createData(db, 1_000);
tx.success();
});
} while (physicalLogFiles(core).getLowestLogVersion() <= highestReadReplicaLogVersion);
readReplica.start();
awaitEx(() -> readReplicasUpToDateAsTheLeader(cluster.awaitLeader(), cluster.readReplicas()), 1, TimeUnit.MINUTES);
// when
cluster.coreTx((db, tx) -> {
createData(db, 10);
tx.success();
});
// then
assertEventually("The read replica has the same data as the core members", () -> DbRepresentation.of(readReplica.database()), equalTo(DbRepresentation.of(cluster.awaitLeader().database())), 10, TimeUnit.SECONDS);
}
use of org.neo4j.kernel.impl.transaction.log.PhysicalLogFiles in project neo4j by neo4j.
the class TransactionLogRecoveryIT method writePartialTx.
private void writePartialTx(File storeDir) throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache.getPageCache(fs), storeDir);
ReadOnlyTransactionIdStore txIdStore = new ReadOnlyTransactionIdStore(pageCache.getPageCache(fs), storeDir);
PhysicalLogFile logFile = new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
txIdStore::getLastCommittedTransactionId, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), new LogHeaderCache(10));
try (Lifespan ignored = new Lifespan(logFile)) {
LogEntryWriter writer = new LogEntryWriter(logFile.getWriter());
writer.writeStartEntry(0, 0, 0x123456789ABCDEFL, txIdStore.getLastCommittedTransactionId() + 1, new byte[] { 0 });
}
}
use of org.neo4j.kernel.impl.transaction.log.PhysicalLogFiles in project neo4j by neo4j.
the class TransactionLogCatchUpWriterTest method verifyTransactionsInLog.
private void verifyTransactionsInLog(long fromTxId, long endTxId) throws IOException {
long expectedTxId = fromTxId;
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
LogVersionedStoreChannel versionedStoreChannel = PhysicalLogFile.openForVersion(logFiles, fs, 0, false);
try (ReadableLogChannel channel = new ReadAheadLogChannel(versionedStoreChannel, LogVersionBridge.NO_MORE_CHANNELS, 1024)) {
try (PhysicalTransactionCursor<ReadableLogChannel> txCursor = new PhysicalTransactionCursor<>(channel, new VersionAwareLogEntryReader<>())) {
while (txCursor.next()) {
CommittedTransactionRepresentation tx = txCursor.get();
long txId = tx.getCommitEntry().getTxId();
assertThat(expectedTxId, lessThanOrEqualTo(endTxId));
assertEquals(expectedTxId, txId);
expectedTxId++;
}
}
}
}
use of org.neo4j.kernel.impl.transaction.log.PhysicalLogFiles in project neo4j by neo4j.
the class StoreCopyClient method writeTransactionsToActiveLogFile.
private void writeTransactionsToActiveLogFile(File tempStoreDir, Response<?> response) throws Exception {
LifeSupport life = new LifeSupport();
try {
// Start the log and appender
PhysicalLogFiles logFiles = new PhysicalLogFiles(tempStoreDir, fs);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, tempStoreDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, tempStoreDir);
LogFile logFile = life.add(new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
readOnlyTransactionIdStore::getLastCommittedTransactionId, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), logHeaderCache));
life.start();
// Just write all transactions to the active log version. Remember that this is after a store copy
// where there are no logs, and the transaction stream we're about to write will probably contain
// transactions that goes some time back, before the last committed transaction id. So we cannot
// use a TransactionAppender, since it has checks for which transactions one can append.
FlushableChannel channel = logFile.getWriter();
final TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
final AtomicLong firstTxId = new AtomicLong(BASE_TX_ID);
response.accept(new Response.Handler() {
@Override
public void obligation(long txId) throws IOException {
throw new UnsupportedOperationException("Shouldn't be called");
}
@Override
public Visitor<CommittedTransactionRepresentation, Exception> transactions() {
return transaction -> {
long txId = transaction.getCommitEntry().getTxId();
if (firstTxId.compareAndSet(BASE_TX_ID, txId)) {
monitor.startReceivingTransactions(txId);
}
writer.append(transaction.getTransactionRepresentation(), txId);
return false;
};
}
});
long endTxId = firstTxId.get();
if (endTxId != BASE_TX_ID) {
monitor.finishReceivingTransactions(endTxId);
}
long currentLogVersion = logVersionRepository.getCurrentLogVersion();
writer.checkPoint(new LogPosition(currentLogVersion, LOG_HEADER_SIZE));
// And since we write this manually we need to set the correct transaction id in the
// header of the log that we just wrote.
File currentLogFile = logFiles.getLogFileForVersion(currentLogVersion);
writeLogHeader(fs, currentLogFile, currentLogVersion, max(BASE_TX_ID, endTxId - 1));
if (!forensics) {
// since we just create new log and put checkpoint into it with offset equals to
// LOG_HEADER_SIZE we need to update last transaction offset to be equal to this newly defined max
// offset otherwise next checkpoint that use last transaction offset will be created for non
// existing offset that is in most of the cases bigger than new log size.
// Recovery will treat that as last checkpoint and will not try to recover store till new
// last closed transaction offset will not overcome old one. Till that happens it will be
// impossible for recovery process to restore the store
File neoStore = new File(tempStoreDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStore, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET, LOG_HEADER_SIZE);
}
} finally {
life.shutdown();
}
}
use of org.neo4j.kernel.impl.transaction.log.PhysicalLogFiles in project neo4j by neo4j.
the class TransactionLogCatchUpWriterTest method simulateStoreCopy.
private org.neo4j.kernel.impl.store.StoreId simulateStoreCopy() throws IOException {
// create an empty store
org.neo4j.kernel.impl.store.StoreId storeId;
NeoStoreDataSource ds = dsRule.getDataSource(storeDir, fs, pageCache, emptyMap());
try (Lifespan ignored = new Lifespan(ds)) {
storeId = ds.getStoreId();
}
// we don't have log files after a store copy
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fsRule.get());
logFiles.accept((file, version) -> file.delete());
return storeId;
}
Aggregations