use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class CommonAbstractStoreTest method failStoreInitializationWhenHeaderRecordCantBeRead.
@Test
public void failStoreInitializationWhenHeaderRecordCantBeRead() throws IOException {
File storeFile = dir.file("a");
PageCache pageCache = mock(PageCache.class);
PagedFile pagedFile = mock(PagedFile.class);
PageCursor pageCursor = mock(PageCursor.class);
when(pageCache.map(eq(storeFile), anyInt(), any(OpenOption.class))).thenReturn(pagedFile);
when(pagedFile.io(0L, PagedFile.PF_SHARED_READ_LOCK)).thenReturn(pageCursor);
when(pageCursor.next()).thenReturn(false);
RecordFormats recordFormats = Standard.LATEST_RECORD_FORMATS;
expectedException.expect(StoreNotFoundException.class);
expectedException.expectMessage("Fail to read header record of store file: " + storeFile.getAbsolutePath());
try (DynamicArrayStore dynamicArrayStore = new DynamicArrayStore(storeFile, config, IdType.NODE_LABELS, idGeneratorFactory, pageCache, NullLogProvider.getInstance(), Settings.INTEGER.apply(GraphDatabaseSettings.label_block_size.getDefaultValue()), recordFormats.dynamic(), recordFormats.storeVersion())) {
dynamicArrayStore.initialise(false);
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MetaDataStoreTest method setUp.
@Before
public void setUp() {
fs = fsRule.get();
pageCache = pageCacheRule.getPageCache(fs);
fakePageCursorOverflow = false;
pageCacheWithFakeOverflow = new DelegatingPageCache(pageCache) {
@Override
public PagedFile map(File file, int pageSize, OpenOption... openOptions) throws IOException {
return new DelegatingPagedFile(super.map(file, pageSize, openOptions)) {
@Override
public PageCursor io(long pageId, int pf_flags) throws IOException {
return new DelegatingPageCursor(super.io(pageId, pf_flags)) {
@Override
public boolean checkAndClearBoundsFlag() {
return fakePageCursorOverflow | super.checkAndClearBoundsFlag();
}
};
}
};
}
};
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MetaDataStoreTest method transactionClosedMustBeAtomic.
@Test
public void transactionClosedMustBeAtomic() throws Throwable {
try (MetaDataStore store = newMetaDataStore()) {
PagedFile pf = store.storeFile;
int initialValue = 2;
store.transactionClosed(initialValue, initialValue, initialValue);
AtomicLong writeCount = new AtomicLong();
AtomicLong fileReadCount = new AtomicLong();
AtomicLong apiReadCount = new AtomicLong();
int upperLimit = 10_000;
int lowerLimit = 100;
long endTime = currentTimeMillis() + SECONDS.toMillis(10);
Race race = new Race();
race.withEndCondition(() -> writeCount.get() >= upperLimit && fileReadCount.get() >= upperLimit && apiReadCount.get() >= upperLimit);
race.withEndCondition(() -> writeCount.get() >= lowerLimit && fileReadCount.get() >= lowerLimit && apiReadCount.get() >= lowerLimit && currentTimeMillis() >= endTime);
race.addContestants(3, () -> {
long count = writeCount.incrementAndGet();
store.transactionCommitted(count, count, count);
});
race.addContestants(3, throwing(() -> {
try (PageCursor cursor = pf.io(0, PagedFile.PF_SHARED_READ_LOCK)) {
assertTrue(cursor.next());
long logVersion, byteOffset;
do {
logVersion = store.getRecordValue(cursor, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_VERSION);
byteOffset = store.getRecordValue(cursor, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET);
} while (cursor.shouldRetry());
assertLogVersionEqualsByteOffset(logVersion, byteOffset, "file");
fileReadCount.incrementAndGet();
}
}));
race.addContestants(3, () -> {
long[] transaction = store.getLastClosedTransaction();
assertLogVersionEqualsByteOffset(transaction[0], transaction[1], "API");
apiReadCount.incrementAndGet();
});
race.go();
}
}
use of org.neo4j.io.pagecache.PagedFile in project neo4j by neo4j.
the class MetaDataStoreTest method setUpgradeTransactionMustBeAtomic.
@Test
public void setUpgradeTransactionMustBeAtomic() throws Throwable {
try (MetaDataStore store = newMetaDataStore()) {
PagedFile pf = store.storeFile;
store.setUpgradeTransaction(0, 0, 0);
AtomicLong writeCount = new AtomicLong();
AtomicLong fileReadCount = new AtomicLong();
AtomicLong apiReadCount = new AtomicLong();
int upperLimit = 10_000;
int lowerLimit = 100;
long endTime = currentTimeMillis() + SECONDS.toMillis(10);
Race race = new Race();
race.withEndCondition(() -> writeCount.get() >= upperLimit && fileReadCount.get() >= upperLimit && apiReadCount.get() >= upperLimit);
race.withEndCondition(() -> writeCount.get() >= lowerLimit && fileReadCount.get() >= lowerLimit && apiReadCount.get() >= lowerLimit && currentTimeMillis() >= endTime);
// writers
race.addContestants(3, () -> {
long count = writeCount.incrementAndGet();
store.setUpgradeTransaction(count, count, count);
});
// file readers
race.addContestants(3, throwing(() -> {
try (PageCursor cursor = pf.io(0, PagedFile.PF_SHARED_READ_LOCK)) {
assertTrue(cursor.next());
long id, checksum;
do {
id = store.getRecordValue(cursor, MetaDataStore.Position.UPGRADE_TRANSACTION_ID);
checksum = store.getRecordValue(cursor, MetaDataStore.Position.UPGRADE_TRANSACTION_CHECKSUM);
} while (cursor.shouldRetry());
assertIdEqualsChecksum(id, checksum, "file");
fileReadCount.incrementAndGet();
}
}));
race.addContestants(3, () -> {
TransactionId transaction = store.getUpgradeTransaction();
assertIdEqualsChecksum(transaction.transactionId(), transaction.checksum(), "API");
apiReadCount.incrementAndGet();
});
race.go();
}
}
Aggregations