use of org.neo4j.consistency.checker.ParallelExecution.ThrowingRunnable in project neo4j by neo4j.
the class IndexChecker method cacheIndex.
private void cacheIndex(IndexContext index, LongRange nodeIdRange, boolean firstRange, CursorContext cursorContext) throws Exception {
IndexAccessor accessor = indexAccessors.accessorFor(index.descriptor);
IndexEntriesReader[] partitions = accessor.newAllEntriesValueReader(context.execution.getNumberOfThreads(), cursorContext);
try {
Value[][] firstValues = new Value[partitions.length][];
Value[][] lastValues = new Value[partitions.length][];
long[] firstEntityIds = new long[partitions.length];
long[] lastEntityIds = new long[partitions.length];
ThrowingRunnable[] workers = new ThrowingRunnable[partitions.length];
for (int i = 0; i < partitions.length; i++) {
IndexEntriesReader partition = partitions[i];
int slot = i;
workers[i] = () -> {
int lastChecksum = 0;
int progressPart = 0;
ProgressListener localCacheProgress = cacheProgress.threadLocalReporter();
var client = cacheAccess.client();
try (var context = new CursorContext(this.context.pageCacheTracer.createPageCursorTracer(CONSISTENCY_INDEX_CACHER_TAG))) {
while (partition.hasNext() && !this.context.isCancelled()) {
long entityId = partition.next();
if (!nodeIdRange.isWithinRangeExclusiveTo(entityId)) {
if (firstRange && entityId >= this.context.highNodeId) {
reporter.forIndexEntry(new IndexEntry(index.descriptor, this.context.tokenNameLookup, entityId)).nodeNotInUse(this.context.recordLoader.node(entityId, context));
} else if (firstRange && index.descriptor.isUnique() && index.hasValues) {
// We check all values belonging to unique indexes while we are checking the first range, to not
// miss duplicated values belonging to different ranges.
Value[] indexedValues = partition.values();
int checksum = checksum(indexedValues);
assert checksum <= CHECKSUM_MASK;
lastChecksum = verifyUniquenessInPartition(index, firstValues, lastValues, firstEntityIds, lastEntityIds, slot, lastChecksum, context, entityId, indexedValues, checksum);
}
continue;
}
int data = IN_USE_MASK;
if (index.hasValues) {
Value[] indexedValues = partition.values();
int checksum = checksum(indexedValues);
assert checksum <= CHECKSUM_MASK;
data |= checksum;
// Also take the opportunity to verify uniqueness, if the index is a uniqueness index
if (firstRange && index.descriptor.isUnique()) {
lastChecksum = verifyUniquenessInPartition(index, firstValues, lastValues, firstEntityIds, lastEntityIds, slot, lastChecksum, context, entityId, indexedValues, checksum);
}
}
client.putToCacheSingle(entityId, index.cacheSlotOffset, data);
if (++progressPart == INDEX_CACHING_PROGRESS_FACTOR) {
localCacheProgress.add(1);
progressPart = 0;
}
}
}
localCacheProgress.done();
};
}
// Run the workers that cache the index contents and that do partition-local uniqueness checking, if index is unique
context.execution.run("Cache index", workers);
// Then, also if the index is unique then do uniqueness checking of the seams between the partitions
if (firstRange && index.descriptor.isUnique() && !context.isCancelled()) {
for (int i = 0; i < partitions.length - 1; i++) {
Value[] left = lastValues[i];
Value[] right = firstValues[i + 1];
// Skip any empty partition - can be empty if all entries in a partition of the index were for nodes outside of the current range.
if (left != null && right != null && Arrays.equals(left, right)) {
long leftEntityId = lastEntityIds[i];
long rightEntityId = firstEntityIds[i + 1];
reporter.forNode(context.recordLoader.node(leftEntityId, cursorContext)).uniqueIndexNotUnique(index.descriptor, left, rightEntityId);
}
}
}
} finally {
IOUtils.closeAll(partitions);
}
}
use of org.neo4j.consistency.checker.ParallelExecution.ThrowingRunnable in project neo4j by neo4j.
the class RelationshipChainChecker method checkDirection.
private void checkDirection(LongRange nodeIdRange, ScanDirection direction) throws Exception {
RelationshipStore relationshipStore = context.neoStores.getRelationshipStore();
long highId = relationshipStore.getHighId();
AtomicBoolean end = new AtomicBoolean();
int numberOfThreads = numberOfChainCheckers + 1;
ThrowingRunnable[] workers = new ThrowingRunnable[numberOfThreads];
ProgressListener localProgress = progress.threadLocalReporter();
ArrayBlockingQueue<BatchedRelationshipRecords>[] threadQueues = new ArrayBlockingQueue[numberOfChainCheckers];
BatchedRelationshipRecords[] threadBatches = new BatchedRelationshipRecords[numberOfChainCheckers];
for (int i = 0; i < numberOfChainCheckers; i++) {
threadQueues[i] = new ArrayBlockingQueue<>(20);
threadBatches[i] = new BatchedRelationshipRecords();
workers[i] = relationshipVsRelationshipChecker(nodeIdRange, direction, relationshipStore, threadQueues[i], end, i);
}
// Record reader
workers[workers.length - 1] = () -> {
RelationshipRecord relationship = relationshipStore.newRecord();
try (var cursorContext = new CursorContext(context.pageCacheTracer.createPageCursorTracer(RELATIONSHIP_CONSISTENCY_CHECKER_TAG));
var cursor = relationshipStore.openPageCursorForReadingWithPrefetching(0, cursorContext)) {
int recordsPerPage = relationshipStore.getRecordsPerPage();
long id = direction.startingId(highId);
while (id >= 0 && id < highId && !context.isCancelled()) {
for (int i = 0; i < recordsPerPage && id >= 0 && id < highId; i++, id = direction.nextId(id)) {
relationshipStore.getRecordByCursor(id, relationship, FORCE, cursor);
localProgress.add(1);
if (relationship.inUse()) {
queueRelationshipCheck(threadQueues, threadBatches, relationship);
}
}
}
processLastRelationshipChecks(threadQueues, threadBatches, end);
localProgress.done();
}
};
Stopwatch stopwatch = Stopwatch.start();
cacheAccess.clearCache();
context.execution.runAll(getClass().getSimpleName() + "-" + direction.name(), workers);
detectSingleRelationshipChainInconsistencies(nodeIdRange);
context.paddedDebug("%s %s took %s", this, direction, duration(stopwatch.elapsed(TimeUnit.MILLISECONDS)));
}
use of org.neo4j.consistency.checker.ParallelExecution.ThrowingRunnable in project neo4j by neo4j.
the class IndexSizes method calculateSizes.
private void calculateSizes(EntityType entityType, ConcurrentMap<IndexDescriptor, Long> indexSizes) throws Exception {
List<IndexDescriptor> indexes = indexAccessors.onlineRules(entityType);
execution.run("Estimate index sizes", indexes.stream().map(index -> (ThrowingRunnable) () -> {
try (var cursorContext = new CursorContext(pageCacheTracer.createPageCursorTracer(SIZE_CALCULATOR_TAG))) {
IndexAccessor accessor = indexAccessors.accessorFor(index);
indexSizes.put(index, accessor.estimateNumberOfEntries(cursorContext));
}
}).toArray(ThrowingRunnable[]::new));
}
Aggregations