use of org.neo4j.io.pagecache.PageCursor in project neo4j by neo4j.
the class BaseHighLimitRecordFormatV3_0_0 method read.
public void read(RECORD record, PageCursor primaryCursor, RecordLoad mode, int recordSize) throws IOException {
int primaryStartOffset = primaryCursor.getOffset();
byte headerByte = primaryCursor.getByte();
boolean inUse = isInUse(headerByte);
boolean doubleRecordUnit = has(headerByte, HEADER_BIT_RECORD_UNIT);
if (doubleRecordUnit) {
boolean firstRecordUnit = has(headerByte, HEADER_BIT_FIRST_RECORD_UNIT);
if (!firstRecordUnit) {
// This is a record unit and not even the first one, so you cannot go here directly and read it,
// it may only be read as part of reading the primary unit.
record.clear();
// Return and try again
primaryCursor.setCursorException("Expected record to be the first unit in the chain, but record header says it's not");
return;
}
// This is a record that is split into multiple record units. We need a bit more clever
// data structures here. For the time being this means instantiating one object,
// but the trade-off is a great reduction in complexity.
long secondaryId = Reference.decode(primaryCursor);
long pageId = pageIdForRecord(secondaryId, primaryCursor.getCurrentPageSize(), recordSize);
int offset = offsetForId(secondaryId, primaryCursor.getCurrentPageSize(), recordSize);
PageCursor secondaryCursor = primaryCursor.openLinkedCursor(pageId);
if ((!secondaryCursor.next()) | offset < 0) {
// We must have made an inconsistent read of the secondary record unit reference.
// No point in trying to read this.
record.clear();
primaryCursor.setCursorException(illegalSecondaryReferenceMessage(pageId));
return;
}
secondaryCursor.setOffset(offset + HEADER_BYTE);
int primarySize = recordSize - (primaryCursor.getOffset() - primaryStartOffset);
// We *could* sanity check the secondary record header byte here, but we won't. If it is wrong, then we most
// likely did an inconsistent read, in which case we'll just retry. Otherwise, if the header byte is wrong,
// then there is little we can do about it here, since we are not allowed to throw exceptions.
int secondarySize = recordSize - HEADER_BYTE;
PageCursor composite = CompositePageCursor.compose(primaryCursor, primarySize, secondaryCursor, secondarySize);
doReadInternal(record, composite, recordSize, headerByte, inUse);
record.setSecondaryUnitId(secondaryId);
} else {
doReadInternal(record, primaryCursor, recordSize, headerByte, inUse);
}
}
use of org.neo4j.io.pagecache.PageCursor in project neo4j by neo4j.
the class BaseHighLimitRecordFormatV3_0_0 method write.
@Override
public void write(RECORD record, PageCursor primaryCursor, int recordSize) throws IOException {
if (record.inUse()) {
// Let the specific implementation provide the additional header bits and we'll provide the core format bits.
byte headerByte = headerBits(record);
assert (headerByte & 0x7) == 0 : "Format-specific header bits (" + headerByte + ") collides with format-generic header bits";
headerByte = set(headerByte, IN_USE_BIT, record.inUse());
headerByte = set(headerByte, HEADER_BIT_RECORD_UNIT, record.requiresSecondaryUnit());
headerByte = set(headerByte, HEADER_BIT_FIRST_RECORD_UNIT, true);
primaryCursor.putByte(headerByte);
if (record.requiresSecondaryUnit()) {
// Write using the normal adapter since the first reference we write cannot really overflow
// into the secondary record
long secondaryUnitId = record.getSecondaryUnitId();
long pageId = pageIdForRecord(secondaryUnitId, primaryCursor.getCurrentPageSize(), recordSize);
int offset = offsetForId(secondaryUnitId, primaryCursor.getCurrentPageSize(), recordSize);
PageCursor secondaryCursor = primaryCursor.openLinkedCursor(pageId);
if (!secondaryCursor.next()) {
// We are not allowed to write this much data to the file, apparently.
record.clear();
return;
}
secondaryCursor.setOffset(offset);
secondaryCursor.putByte((byte) (IN_USE_BIT | HEADER_BIT_RECORD_UNIT));
int recordSizeWithoutHeader = recordSize - HEADER_BYTE;
PageCursor composite = CompositePageCursor.compose(primaryCursor, recordSizeWithoutHeader, secondaryCursor, recordSizeWithoutHeader);
Reference.encode(secondaryUnitId, composite);
doWriteInternal(record, composite);
} else {
doWriteInternal(record, primaryCursor);
}
} else {
markAsUnused(primaryCursor, record, recordSize);
}
}
use of org.neo4j.io.pagecache.PageCursor in project neo4j by neo4j.
the class MetaDataStoreTest method transactionCommittedMustBeAtomic.
@Test
public void transactionCommittedMustBeAtomic() throws Throwable {
try (MetaDataStore store = newMetaDataStore()) {
PagedFile pf = store.storeFile;
store.transactionCommitted(2, 2, 2);
AtomicLong writeCount = new AtomicLong();
AtomicLong fileReadCount = new AtomicLong();
AtomicLong apiReadCount = new AtomicLong();
int upperLimit = 10_000;
int lowerLimit = 100;
long endTime = currentTimeMillis() + SECONDS.toMillis(10);
Race race = new Race();
race.withEndCondition(() -> writeCount.get() >= upperLimit && fileReadCount.get() >= upperLimit && apiReadCount.get() >= upperLimit);
race.withEndCondition(() -> writeCount.get() >= lowerLimit && fileReadCount.get() >= lowerLimit && apiReadCount.get() >= lowerLimit && currentTimeMillis() >= endTime);
race.addContestants(3, () -> {
long count = writeCount.incrementAndGet();
store.transactionCommitted(count, count, count);
});
race.addContestants(3, throwing(() -> {
try (PageCursor cursor = pf.io(0, PagedFile.PF_SHARED_READ_LOCK)) {
assertTrue(cursor.next());
long id, checksum;
do {
id = store.getRecordValue(cursor, MetaDataStore.Position.LAST_TRANSACTION_ID);
checksum = store.getRecordValue(cursor, MetaDataStore.Position.LAST_TRANSACTION_CHECKSUM);
} while (cursor.shouldRetry());
assertIdEqualsChecksum(id, checksum, "file");
fileReadCount.incrementAndGet();
}
}));
race.addContestants(3, () -> {
TransactionId transaction = store.getLastCommittedTransaction();
assertIdEqualsChecksum(transaction.transactionId(), transaction.checksum(), "API");
apiReadCount.incrementAndGet();
});
race.go();
}
}
use of org.neo4j.io.pagecache.PageCursor in project neo4j by neo4j.
the class CommonAbstractStore method extractHeaderRecord.
private void extractHeaderRecord(PagedFile pagedFile) throws IOException {
if (getNumberOfReservedLowIds() > 0) {
try (PageCursor pageCursor = pagedFile.io(0, PF_SHARED_READ_LOCK)) {
if (pageCursor.next()) {
do {
pageCursor.setOffset(0);
readHeaderAndInitializeRecordFormat(pageCursor);
} while (pageCursor.shouldRetry());
if (pageCursor.checkAndClearBoundsFlag()) {
throw new UnderlyingStorageException("Out of page bounds when reading header; page size too small: " + pageCache.pageSize() + " bytes.");
}
} else {
throw new StoreNotFoundException("Fail to read header record of store file: " + storageFileName);
}
}
} else {
readHeaderAndInitializeRecordFormat(null);
}
recordSize = determineRecordSize();
}
use of org.neo4j.io.pagecache.PageCursor in project neo4j by neo4j.
the class CommonAbstractStore method getRawRecordData.
public byte[] getRawRecordData(long id) throws IOException {
byte[] data = new byte[recordSize];
long pageId = pageIdForRecord(id);
int offset = offsetForId(id);
try (PageCursor cursor = storeFile.io(pageId, PagedFile.PF_SHARED_READ_LOCK)) {
if (cursor.next()) {
do {
cursor.setOffset(offset);
cursor.getBytes(data);
} while (cursor.shouldRetry());
checkForDecodingErrors(cursor, id, CHECK);
}
}
return data;
}
Aggregations