use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MetaDataStore method setRecord.
/**
* Writes a record in a neostore file.
* This method only works for neostore files of the current version.
*
* @param pageCache {@link PageCache} the {@code neostore} file lives in.
* @param neoStore {@link File} pointing to the neostore.
* @param position record {@link Position}.
* @param value value to write in that record.
* @return the previous value before writing.
* @throws IOException if any I/O related error occurs.
*/
public static long setRecord(PageCache pageCache, File neoStore, Position position, long value) throws IOException {
long previousValue = FIELD_NOT_INITIALIZED;
int pageSize = getPageSize(pageCache);
try (PagedFile pagedFile = pageCache.map(neoStore, pageSize)) {
int offset = offset(position);
try (PageCursor cursor = pagedFile.io(0, PagedFile.PF_SHARED_WRITE_LOCK)) {
if (cursor.next()) {
// We're overwriting a record, get the previous value
cursor.setOffset(offset);
byte inUse = cursor.getByte();
long record = cursor.getLong();
if (inUse == Record.IN_USE.byteValue()) {
previousValue = record;
}
// Write the value
cursor.setOffset(offset);
cursor.putByte(Record.IN_USE.byteValue());
cursor.putLong(value);
if (cursor.checkAndClearBoundsFlag()) {
MetaDataRecord neoStoreRecord = new MetaDataRecord();
neoStoreRecord.setId(position.id);
throw new UnderlyingStorageException(buildOutOfBoundsExceptionMessage(neoStoreRecord, 0, offset, RECORD_SIZE, pageSize, neoStore.getAbsolutePath()));
}
}
}
}
return previousValue;
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class ConfigurableStandalonePageCacheFactoryTest method mustAutomaticallyStartEvictionThread.
@Test(timeout = 10000)
public void mustAutomaticallyStartEvictionThread() throws IOException {
try (FileSystemAbstraction fs = new DelegateFileSystemAbstraction(Jimfs.newFileSystem(jimConfig()))) {
File file = new File("/a").getCanonicalFile();
fs.create(file).close();
try (PageCache cache = ConfigurableStandalonePageCacheFactory.createPageCache(fs);
PagedFile pf = cache.map(file, 4096);
PageCursor cursor = pf.io(0, PagedFile.PF_SHARED_WRITE_LOCK)) {
// If the eviction thread has not been started, then this test will block forever.
for (int i = 0; i < 10_000; i++) {
assertTrue(cursor.next());
cursor.putInt(42);
}
}
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class StoreMigrator method moveMigratedFiles.
@Override
public void moveMigratedFiles(File migrationDir, File storeDir, String versionToUpgradeFrom, String versionToUpgradeTo) throws IOException {
// Move the migrated ones into the store directory
StoreFile.fileOperation(MOVE, fileSystem, migrationDir, storeDir, StoreFile.currentStoreFiles(), // allow to skip non existent source files
true, // allow to overwrite target files
ExistingTargetStrategy.OVERWRITE, StoreFileType.values());
// move the files with the page cache.
try {
Iterable<FileHandle> fileHandles = pageCache.streamFilesRecursive(migrationDir)::iterator;
for (FileHandle fh : fileHandles) {
Predicate<StoreFile> predicate = storeFile -> storeFile.fileName(StoreFileType.STORE).equals(fh.getFile().getName());
if (StreamSupport.stream(StoreFile.currentStoreFiles().spliterator(), false).anyMatch(predicate)) {
final Optional<PagedFile> optionalPagedFile = pageCache.getExistingMapping(fh.getFile());
if (optionalPagedFile.isPresent()) {
optionalPagedFile.get().close();
}
fh.rename(new File(storeDir, fh.getFile().getName()), StandardCopyOption.REPLACE_EXISTING);
}
}
} catch (NoSuchFileException e) {
//This means that we had no files only present in the page cache, this is fine.
}
RecordFormats oldFormat = selectForVersion(versionToUpgradeFrom);
RecordFormats newFormat = selectForVersion(versionToUpgradeTo);
boolean movingAwayFromVersionTrailers = oldFormat.hasCapability(VERSION_TRAILERS) && !newFormat.hasCapability(VERSION_TRAILERS);
if (movingAwayFromVersionTrailers) {
StoreFile.removeTrailers(versionToUpgradeFrom, fileSystem, storeDir, pageCache.pageSize());
}
File neoStore = new File(storeDir, MetaDataStore.DEFAULT_NAME);
long logVersion = MetaDataStore.getRecord(pageCache, neoStore, Position.LOG_VERSION);
long lastCommittedTx = MetaDataStore.getRecord(pageCache, neoStore, Position.LAST_TRANSACTION_ID);
// update or add upgrade id and time and other necessary neostore records
updateOrAddNeoStoreFieldsAsPartOfMigration(migrationDir, storeDir, versionToUpgradeTo);
// delete old logs
legacyLogs.deleteUnusedLogFiles(storeDir);
if (movingAwayFromVersionTrailers) {
// write a check point in the log in order to make recovery work in the newer version
new StoreMigratorCheckPointer(storeDir, fileSystem).checkPoint(logVersion, lastCommittedTx);
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class KeyValueStoreFileFormat method open.
/**
* Opens an existing store file.
*
* @param fs the file system which holds the store file.
* @param path the location in the file system where the store file resides.
* @param pages the page cache to use for opening the store file.
* @return the opened store file.
*/
private KeyValueStoreFile open(FileSystemAbstraction fs, File path, PageCache pages) throws IOException {
ByteBuffer buffer = ByteBuffer.wrap(new byte[maxSize * 4]);
try (StoreChannel file = fs.open(path, "r")) {
while (buffer.hasRemaining()) {
int bytes = file.read(buffer);
if (bytes == -1) {
break;
}
}
}
buffer.flip();
// compute the key sizes
int keySize = 0;
while (buffer.hasRemaining() && buffer.get() == 0) {
if (++keySize > maxSize) {
throw new IOException("Invalid header, key size too large.");
}
}
// compute the value size
// start at 1, since we've seen the first non-zero byte
int valueSize = 1;
for (int zeros = 0; zeros <= keySize; zeros++) {
if (!buffer.hasRemaining()) {
throw new IOException("Invalid value size: " + valueSize);
}
if (buffer.get() != 0) {
zeros = 0;
}
if (++valueSize - keySize > maxSize) {
throw new IOException("Invalid header, value size too large.");
}
}
// we read in the next zero-key
valueSize -= keySize;
// compute a page size that aligns with the <key,value>-tuple size
int pageSize = pageSize(pages, keySize, valueSize);
// read the store metadata
{
BigEndianByteArrayBuffer formatSpecifier = new BigEndianByteArrayBuffer(new byte[valueSize]);
writeFormatSpecifier(formatSpecifier);
PagedFile file = pages.map(path, pageSize);
try {
BigEndianByteArrayBuffer key = new BigEndianByteArrayBuffer(new byte[keySize]);
BigEndianByteArrayBuffer value = new BigEndianByteArrayBuffer(new byte[valueSize]);
// the first value is the format identifier, pass it along
buffer.position(keySize);
buffer.limit(keySize + valueSize);
value.dataFrom(buffer);
MetadataCollector metadata = metadata(formatSpecifier, pageSize, keySize, valueSize);
// scan and catalogue all entries in the file
KeyValueStoreFile.scanAll(file, 0, metadata, key, value);
KeyValueStoreFile storeFile = new KeyValueStoreFile(file, keySize, valueSize, metadata);
file = null;
return storeFile;
} finally {
if (file != null) {
file.close();
}
}
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class AbstractRecordFormatTest method verifyWriteAndRead.
private <R extends AbstractBaseRecord> void verifyWriteAndRead(Supplier<RecordFormat<R>> formatSupplier, Supplier<Generator<R>> generatorSupplier, Supplier<RecordKey<R>> keySupplier) throws IOException {
// GIVEN
try (PagedFile storeFile = pageCache.map(new File("store-" + name.getMethodName()), PAGE_SIZE, CREATE)) {
RecordFormat<R> format = formatSupplier.get();
RecordKey<R> key = keySupplier.get();
Generator<R> generator = generatorSupplier.get();
int recordSize = format.getRecordSize(new IntStoreHeader(DATA_SIZE));
BatchingIdSequence idSequence = new BatchingIdSequence(random.nextBoolean() ? idSureToBeOnTheNextPage(PAGE_SIZE, recordSize) : 10);
// WHEN
long time = currentTimeMillis();
long endTime = time + TEST_TIME;
long i = 0;
for (; i < TEST_ITERATIONS && currentTimeMillis() < endTime; i++) {
R written = generator.get(recordSize, format, i % 5);
R read = format.newRecord();
try {
writeRecord(written, format, storeFile, recordSize, idSequence);
readAndVerifyRecord(written, read, format, key, storeFile, recordSize);
idSequence.reset();
} catch (Throwable t) {
Exceptions.setMessage(t, t.getMessage() + " : written:" + written + ", read:" + read + ", seed:" + random.seed() + ", iteration:" + i);
throw t;
}
}
}
}
Aggregations