use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class PreFetcher method run.
@Override
public void run() {
// Phase 1: Wait for observed cursor to start moving.
// Give up if nothing happens for 150 milliseconds.
setDeadline(150, TimeUnit.MILLISECONDS);
long initialPageId;
while ((initialPageId = getCurrentObservedPageId()) == UNBOUND_PAGE_ID) {
pause();
if (pastDeadline()) {
// Give up. Looks like this cursor is either already finished, or never started.
return;
}
}
// Phase 2: Wait for the cursor to move either forwards or backwards, to determine the prefetching direction.
// We will wait up to 200 milliseconds for this phase to complete.
setDeadline(200, TimeUnit.MILLISECONDS);
long secondPageId;
while ((secondPageId = getCurrentObservedPageId()) == initialPageId) {
pause();
if (pastDeadline()) {
// Okay, this is going too slow. Give up.
return;
}
}
if (secondPageId == UNBOUND_PAGE_ID) {
// We're done. The observed cursor was closed.
return;
}
// Phase 3: We now know what direction to prefetch in.
// Just keep loading pages on the right side of the cursor until its closed.
boolean forward = initialPageId < secondPageId;
long currentPageId;
long cp;
long nextPageId;
long fromPage;
long toPage;
// Offset is a fixed adjustment of the observed cursor position.
// This moves the start of the pre-fetch range forward for forward pre-fetching,
// or the end position backward for backward pre-fetching.
long offset = forward ? 1 : -1;
// Jump is the dynamically adjusted size of the prefetch range,
// with a sign component to indicate forwards or backwards pre-fetching.
// That is, jump is negative if we are pre-fetching backwards.
// This way, observed position + jump is the end or start of the pre-fetch range,
// for forwards or backwards pre-fetch respectively.
// The initial value don't matter so much. Just same as offset, so we initially fetch one page.
long jump = offset;
try (var tracer = this.tracer.createPageCursorTracer(TRACER_PRE_FETCHER_TAG);
PageCursor prefetchCursor = cursorFactory.takeReadCursor(0, PF_SHARED_READ_LOCK, new CursorContext(tracer))) {
currentPageId = getCurrentObservedPageId();
while (currentPageId != UNBOUND_PAGE_ID) {
cp = currentPageId + offset;
if (forward) {
fromPage = cp;
toPage = cp + jump;
} else {
fromPage = Math.max(0, cp + jump);
toPage = cp;
}
while (fromPage < toPage) {
if (!prefetchCursor.next(fromPage) || cancelled) {
// Reached the end of the file. Or got cancelled.
return;
}
fromPage++;
}
// Phase 3.5: After each prefetch round, we wait for the cursor to move again.
// If it just stops somewhere for more than a second, then we quit.
nextPageId = getCurrentObservedPageId();
if (nextPageId == currentPageId) {
setDeadline(10, TimeUnit.SECONDS);
while (nextPageId == currentPageId) {
pause();
if (pastDeadline()) {
// The cursor hasn't made any progress for a whole second. Leave it alone.
return;
}
nextPageId = getCurrentObservedPageId();
}
madeProgress();
}
if (nextPageId != UNBOUND_PAGE_ID) {
jump = (nextPageId - currentPageId) * 2;
}
currentPageId = nextPageId;
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class Read method nodeLabelScan.
@Override
public final Scan<NodeLabelIndexCursor> nodeLabelScan(int label) {
ktx.assertOpen();
CursorContext cursorContext = ktx.cursorContext();
TokenScan tokenScan;
try {
Iterator<IndexDescriptor> index = index(SchemaDescriptor.forAnyEntityTokens(EntityType.NODE));
if (!index.hasNext()) {
throw new IndexNotFoundKernelException("There is no index that can back a node label scan.");
}
IndexDescriptor nliDescriptor = index.next();
DefaultTokenReadSession session = (DefaultTokenReadSession) tokenReadSession(nliDescriptor);
tokenScan = session.reader.entityTokenScan(label, cursorContext);
} catch (IndexNotFoundKernelException e) {
throw new RuntimeException(e);
}
return new NodeLabelIndexCursorScan(this, label, tokenScan, cursorContext);
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class Loaders method propertyLoader.
public static RecordLoader<PropertyRecord, PrimitiveRecord> propertyLoader(final PropertyStore store, CursorContext cursorContext) {
return new RecordLoader<>(store, cursorContext) {
@Override
public PropertyRecord newUnused(long key, PrimitiveRecord additionalData) {
PropertyRecord record = new PropertyRecord(key);
setOwner(record, additionalData);
return andMarkAsCreated(record);
}
private void setOwner(PropertyRecord record, PrimitiveRecord owner) {
if (owner != null) {
owner.setIdTo(record);
}
}
@Override
public PropertyRecord load(long key, PrimitiveRecord additionalData, RecordLoad load, CursorContext cursorContext) {
PropertyRecord record = super.load(key, additionalData, load, cursorContext);
setOwner(record, additionalData);
return record;
}
@Override
public PropertyRecord copy(PropertyRecord propertyRecord) {
return new PropertyRecord(propertyRecord);
}
};
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class RecordLoading method safeLoadTokens.
static <RECORD extends TokenRecord> List<NamedToken> safeLoadTokens(TokenStore<RECORD> tokenStore, CursorContext cursorContext) {
long highId = tokenStore.getHighId();
List<NamedToken> tokens = new ArrayList<>();
DynamicStringStore nameStore = tokenStore.getNameStore();
List<DynamicRecord> nameRecords = new ArrayList<>();
MutableLongSet seenRecordIds = new LongHashSet();
int nameBlockSize = nameStore.getRecordDataSize();
try (RecordReader<RECORD> tokenReader = new RecordReader<>(tokenStore, true, cursorContext);
RecordReader<DynamicRecord> nameReader = new RecordReader<>(nameStore, false, cursorContext)) {
for (long id = 0; id < highId; id++) {
RECORD record = tokenReader.read(id);
nameRecords.clear();
if (record.inUse()) {
String name;
if (!NULL_REFERENCE.is(record.getNameId()) && safeLoadDynamicRecordChain(r -> nameRecords.add(r.copy()), nameReader, seenRecordIds, record.getNameId(), nameBlockSize)) {
record.addNameRecords(nameRecords);
name = tokenStore.getStringFor(record, cursorContext);
} else {
name = format("<name not loaded due to token(%d) referencing unused name record>", id);
}
tokens.add(new NamedToken(name, toIntExact(id), record.isInternal()));
}
}
}
return tokens;
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class RecordStorageConsistencyChecker method check.
public void check() throws ConsistencyCheckIncompleteException {
assert !context.isCancelled();
try {
context.initialize();
// Starting by loading all tokens from store into the TokenHolders, loaded in a safe way of course
safeLoadTokens(neoStores);
// Check schema - constraints and indexes, that sort of thing
// This is done before instantiating the other checker instances because the schema checker will also
// populate maps regarding mandatory properties which the node/relationship checkers uses
SchemaChecker schemaChecker = new SchemaChecker(context);
MutableIntObjectMap<MutableIntSet> mandatoryNodeProperties = new IntObjectHashMap<>();
MutableIntObjectMap<MutableIntSet> mandatoryRelationshipProperties = new IntObjectHashMap<>();
try (var cursorContext = new CursorContext(cacheTracer.createPageCursorTracer(SCHEMA_CONSISTENCY_CHECKER_TAG))) {
schemaChecker.check(mandatoryNodeProperties, mandatoryRelationshipProperties, cursorContext);
}
// Some pieces of check logic are extracted from this main class to reduce the size of this class. Instantiate those here first
NodeChecker nodeChecker = new NodeChecker(context, mandatoryNodeProperties);
IndexChecker indexChecker = new IndexChecker(context, EntityType.NODE);
RelationshipChecker relationshipChecker = new RelationshipChecker(context, mandatoryRelationshipProperties);
RelationshipGroupChecker relationshipGroupChecker = new RelationshipGroupChecker(context);
RelationshipChainChecker relationshipChainChecker = new RelationshipChainChecker(context);
ProgressMonitorFactory.Completer progressCompleter = progress.build();
int numberOfRanges = limiter.numberOfRanges();
for (int i = 1; limiter.hasNext(); i++) {
if (isCancelled()) {
break;
}
LongRange range = limiter.next();
if (numberOfRanges > 1) {
context.debug("=== Checking range %d/%d (%s) ===", i, numberOfRanges, range);
}
context.initializeRange();
// Tell the cache that the pivot node id is the low end of this range. This will make all interactions with the cache
// take that into consideration when working with offset arrays where the index is based on node ids.
cacheAccess.setPivotId(range.from());
// Go into a node-centric mode where the nodes themselves are checked and somewhat cached off-heap.
// Then while we have the nodes loaded in cache do all other checking that has anything to do with nodes
// so that the "other" store can be checked sequentially and the random node lookups will be cheap
context.runIfAllowed(indexChecker, range);
cacheAccess.setCacheSlotSizesAndClear(DEFAULT_SLOT_SIZES);
context.runIfAllowed(nodeChecker, range);
context.runIfAllowed(relationshipGroupChecker, range);
context.runIfAllowed(relationshipChecker, range);
context.runIfAllowed(relationshipChainChecker, range);
}
if (!isCancelled() && context.consistencyFlags.isCheckGraph()) {
// All counts we've observed while doing other checking along the way we compare against the counts store here
checkCounts();
}
progressCompleter.close();
} catch (Exception e) {
cancel("ConsistencyChecker failed unexpectedly");
throw new ConsistencyCheckIncompleteException(e);
}
}
Aggregations