use of org.neo4j.kernel.impl.transaction.log.FlushableChannel in project neo4j by neo4j.
the class SimpleFileStorage method writeState.
@Override
public void writeState(T state) throws IOException {
fileSystem.mkdirs(file.getParentFile());
fileSystem.deleteFile(file);
try (FlushableChannel channel = new PhysicalFlushableChannel(fileSystem.create(file))) {
marshal.marshal(state, channel);
}
}
use of org.neo4j.kernel.impl.transaction.log.FlushableChannel in project neo4j by neo4j.
the class StoreCopyClient method writeTransactionsToActiveLogFile.
private void writeTransactionsToActiveLogFile(File tempStoreDir, Response<?> response) throws Exception {
LifeSupport life = new LifeSupport();
try {
// Start the log and appender
PhysicalLogFiles logFiles = new PhysicalLogFiles(tempStoreDir, fs);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, tempStoreDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, tempStoreDir);
LogFile logFile = life.add(new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
readOnlyTransactionIdStore::getLastCommittedTransactionId, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), logHeaderCache));
life.start();
// Just write all transactions to the active log version. Remember that this is after a store copy
// where there are no logs, and the transaction stream we're about to write will probably contain
// transactions that goes some time back, before the last committed transaction id. So we cannot
// use a TransactionAppender, since it has checks for which transactions one can append.
FlushableChannel channel = logFile.getWriter();
final TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
final AtomicLong firstTxId = new AtomicLong(BASE_TX_ID);
response.accept(new Response.Handler() {
@Override
public void obligation(long txId) throws IOException {
throw new UnsupportedOperationException("Shouldn't be called");
}
@Override
public Visitor<CommittedTransactionRepresentation, Exception> transactions() {
return transaction -> {
long txId = transaction.getCommitEntry().getTxId();
if (firstTxId.compareAndSet(BASE_TX_ID, txId)) {
monitor.startReceivingTransactions(txId);
}
writer.append(transaction.getTransactionRepresentation(), txId);
return false;
};
}
});
long endTxId = firstTxId.get();
if (endTxId != BASE_TX_ID) {
monitor.finishReceivingTransactions(endTxId);
}
long currentLogVersion = logVersionRepository.getCurrentLogVersion();
writer.checkPoint(new LogPosition(currentLogVersion, LOG_HEADER_SIZE));
// And since we write this manually we need to set the correct transaction id in the
// header of the log that we just wrote.
File currentLogFile = logFiles.getLogFileForVersion(currentLogVersion);
writeLogHeader(fs, currentLogFile, currentLogVersion, max(BASE_TX_ID, endTxId - 1));
if (!forensics) {
// since we just create new log and put checkpoint into it with offset equals to
// LOG_HEADER_SIZE we need to update last transaction offset to be equal to this newly defined max
// offset otherwise next checkpoint that use last transaction offset will be created for non
// existing offset that is in most of the cases bigger than new log size.
// Recovery will treat that as last checkpoint and will not try to recover store till new
// last closed transaction offset will not overcome old one. Till that happens it will be
// impossible for recovery process to restore the store
File neoStore = new File(tempStoreDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStore, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET, LOG_HEADER_SIZE);
}
} finally {
life.shutdown();
}
}
use of org.neo4j.kernel.impl.transaction.log.FlushableChannel in project neo4j by neo4j.
the class CoreBootstrapper method appendNullTransactionLogEntryToSetRaftIndexToMinusOne.
private void appendNullTransactionLogEntryToSetRaftIndexToMinusOne() throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, storeDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, storeDir);
PhysicalLogFile logFile = new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
() -> readOnlyTransactionIdStore.getLastClosedTransactionId() - 1, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), new LogHeaderCache(10));
long dummyTransactionId;
try (Lifespan lifespan = new Lifespan(logFile)) {
FlushableChannel channel = logFile.getWriter();
TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
long lastCommittedTransactionId = readOnlyTransactionIdStore.getLastCommittedTransactionId();
PhysicalTransactionRepresentation tx = new PhysicalTransactionRepresentation(Collections.emptyList());
byte[] txHeaderBytes = LogIndexTxHeaderEncoding.encodeLogIndexAsTxHeader(-1);
tx.setHeader(txHeaderBytes, -1, -1, -1, lastCommittedTransactionId, -1, -1);
dummyTransactionId = lastCommittedTransactionId + 1;
writer.append(tx, dummyTransactionId);
channel.prepareForFlush().flush();
}
File neoStoreFile = new File(storeDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStoreFile, LAST_TRANSACTION_ID, dummyTransactionId);
}
use of org.neo4j.kernel.impl.transaction.log.FlushableChannel in project neo4j by neo4j.
the class InMemoryCountsStoreCountsSnapshotSerializerIntegrationTest method largeWorkloadOnPhysicalLogTest.
@Test
public void largeWorkloadOnPhysicalLogTest() throws IOException {
//GIVEN
try (FileSystemAbstraction fs = new DefaultFileSystemAbstraction()) {
File tempFile = new File(testDir.directory(), "temp");
StoreChannel rawChannel = fs.create(tempFile);
Map<CountsKey, long[]> map = CountsStoreMapGenerator.simpleCountStoreMap(100000);
CountsSnapshot countsSnapshot = new CountsSnapshot(1, map);
CountsSnapshot recovered;
//WHEN
try (FlushableChannel tempChannel = new PhysicalFlushableChannel(rawChannel)) {
serialize(tempChannel, countsSnapshot);
}
// close() here is necessary to flush the temp buffer into the channel so we can read it next
// The try-with-resources closes the channel, need to reopen
rawChannel = fs.open(tempFile, "r");
try (ReadAheadChannel<StoreChannel> readAheadChannel = new ReadAheadChannel<>(rawChannel)) {
recovered = deserialize(readAheadChannel);
//THEN
Assert.assertEquals(countsSnapshot.getTxId(), recovered.getTxId());
for (Map.Entry<CountsKey, long[]> pair : countsSnapshot.getMap().entrySet()) {
long[] value = recovered.getMap().get(pair.getKey());
Assert.assertNotNull(value);
Assert.assertArrayEquals(value, pair.getValue());
}
for (Map.Entry<CountsKey, long[]> pair : recovered.getMap().entrySet()) {
long[] value = countsSnapshot.getMap().get(pair.getKey());
Assert.assertNotNull(value);
Assert.assertArrayEquals(value, pair.getValue());
}
}
}
}
Aggregations