use of org.neo4j.io.fs.StoreChannel in project neo4j by neo4j.
the class StoreMigratorCheckPointer method checkPoint.
/**
* Write a check point in the log file with the given version
* <p>
* It will create the file with header containing the log version and lastCommittedTx given as arguments
*
* @param logVersion the log version to open
* @param lastCommittedTx the last committed tx id
*/
public void checkPoint(long logVersion, long lastCommittedTx) throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fileSystem);
File logFileForVersion = logFiles.getLogFileForVersion(logVersion);
if (!fileSystem.fileExists(logFileForVersion)) {
try (StoreChannel channel = fileSystem.create(logFileForVersion)) {
writeLogHeader(channel, logVersion, lastCommittedTx);
}
}
try (LogVersionedStoreChannel storeChannel = PhysicalLogFile.openForVersion(logFiles, fileSystem, logVersion, true)) {
long offset = storeChannel.size();
storeChannel.position(offset);
try (PositionAwarePhysicalFlushableChannel channel = new PositionAwarePhysicalFlushableChannel(storeChannel)) {
TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
writer.checkPoint(new LogPosition(logVersion, offset));
}
}
}
use of org.neo4j.io.fs.StoreChannel in project neo4j by neo4j.
the class LegacyLogEntryReader method openReadableChannel.
public Pair<LogHeader, IOCursor<LogEntry>> openReadableChannel(File logFile) throws IOException {
final StoreChannel rawChannel = fs.open(logFile, "r");
final LogHeader header = readLogHeader(ByteBuffer.allocate(LOG_HEADER_SIZE), rawChannel, false, logFile);
LogEntryReader<ReadableLogChannel> reader = readerFactory.apply(header);
// this ensures that the last committed txId field in the header is initialized properly
long lastCommittedTxId = Math.max(BASE_TX_ID, header.lastCommittedTxId);
final PhysicalLogVersionedStoreChannel channel = new PhysicalLogVersionedStoreChannel(rawChannel, header.logVersion, header.logFormatVersion);
final ReadableLogChannel readableChannel = new ReadAheadLogChannel(channel, NO_MORE_CHANNELS);
final IOCursor<LogEntry> cursor = new LogEntrySortingCursor(reader, readableChannel);
return Pair.of(new LogHeader(CURRENT_LOG_VERSION, header.logVersion, lastCommittedTxId), cursor);
}
use of org.neo4j.io.fs.StoreChannel in project neo4j by neo4j.
the class IdGeneratorImpl method createGenerator.
/**
* Creates a new id generator.
*
* @param fileName The name of the id generator
* @param throwIfFileExists if {@code true} will cause an {@link UnderlyingStorageException} to be thrown if
* the file already exists. if {@code false} will truncate the file writing the header in it.
*/
public static void createGenerator(FileSystemAbstraction fs, File fileName, long highId, boolean throwIfFileExists) {
// sanity checks
if (fs == null) {
throw new IllegalArgumentException("Null filesystem");
}
if (fileName == null) {
throw new IllegalArgumentException("Null filename");
}
if (throwIfFileExists && fs.fileExists(fileName)) {
throw new IllegalStateException("Can't create IdGeneratorFile[" + fileName + "], file already exists");
}
try (StoreChannel channel = fs.create(fileName)) {
// write the header
channel.truncate(0);
ByteBuffer buffer = ByteBuffer.allocate(HEADER_SIZE);
buffer.put(CLEAN_GENERATOR).putLong(highId).flip();
channel.write(buffer);
channel.force(false);
} catch (IOException e) {
throw new UnderlyingStorageException("Unable to create id generator" + fileName, e);
}
}
use of org.neo4j.io.fs.StoreChannel in project neo4j by neo4j.
the class KeyValueStoreFileFormat method open.
/**
* Opens an existing store file.
*
* @param fs the file system which holds the store file.
* @param path the location in the file system where the store file resides.
* @param pages the page cache to use for opening the store file.
* @return the opened store file.
*/
private KeyValueStoreFile open(FileSystemAbstraction fs, File path, PageCache pages) throws IOException {
ByteBuffer buffer = ByteBuffer.wrap(new byte[maxSize * 4]);
try (StoreChannel file = fs.open(path, "r")) {
while (buffer.hasRemaining()) {
int bytes = file.read(buffer);
if (bytes == -1) {
break;
}
}
}
buffer.flip();
// compute the key sizes
int keySize = 0;
while (buffer.hasRemaining() && buffer.get() == 0) {
if (++keySize > maxSize) {
throw new IOException("Invalid header, key size too large.");
}
}
// compute the value size
// start at 1, since we've seen the first non-zero byte
int valueSize = 1;
for (int zeros = 0; zeros <= keySize; zeros++) {
if (!buffer.hasRemaining()) {
throw new IOException("Invalid value size: " + valueSize);
}
if (buffer.get() != 0) {
zeros = 0;
}
if (++valueSize - keySize > maxSize) {
throw new IOException("Invalid header, value size too large.");
}
}
// we read in the next zero-key
valueSize -= keySize;
// compute a page size that aligns with the <key,value>-tuple size
int pageSize = pageSize(pages, keySize, valueSize);
// read the store metadata
{
BigEndianByteArrayBuffer formatSpecifier = new BigEndianByteArrayBuffer(new byte[valueSize]);
writeFormatSpecifier(formatSpecifier);
PagedFile file = pages.map(path, pageSize);
try {
BigEndianByteArrayBuffer key = new BigEndianByteArrayBuffer(new byte[keySize]);
BigEndianByteArrayBuffer value = new BigEndianByteArrayBuffer(new byte[valueSize]);
// the first value is the format identifier, pass it along
buffer.position(keySize);
buffer.limit(keySize + valueSize);
value.dataFrom(buffer);
MetadataCollector metadata = metadata(formatSpecifier, pageSize, keySize, valueSize);
// scan and catalogue all entries in the file
KeyValueStoreFile.scanAll(file, 0, metadata, key, value);
KeyValueStoreFile storeFile = new KeyValueStoreFile(file, keySize, valueSize, metadata);
file = null;
return storeFile;
} finally {
if (file != null) {
file.close();
}
}
}
}
use of org.neo4j.io.fs.StoreChannel in project neo4j by neo4j.
the class StateRecoveryManagerTest method writeSomeGarbage.
private void writeSomeGarbage(EphemeralFileSystemAbstraction fsa, File file) throws IOException {
final StoreChannel channel = fsa.open(file, "rw");
ByteBuffer buffer = ByteBuffer.allocate(4);
buffer.putInt(9876);
buffer.flip();
channel.writeAll(buffer);
channel.force(false);
channel.close();
}
Aggregations