use of org.eclipse.collections.api.map.primitive.MutableLongObjectMap in project neo4j by neo4j.
the class IndexingService method populateIndexesOfAllTypes.
private void populateIndexesOfAllTypes(MutableLongObjectMap<IndexDescriptor> rebuildingDescriptors, IndexMap indexMap) {
Map<EntityType, MutableLongObjectMap<IndexDescriptor>> rebuildingDescriptorsByType = new EnumMap<>(EntityType.class);
for (IndexDescriptor descriptor : rebuildingDescriptors) {
rebuildingDescriptorsByType.computeIfAbsent(descriptor.schema().entityType(), type -> new LongObjectHashMap<>()).put(descriptor.getId(), descriptor);
}
for (Map.Entry<EntityType, MutableLongObjectMap<IndexDescriptor>> descriptorToPopulate : rebuildingDescriptorsByType.entrySet()) {
IndexPopulationJob populationJob = newIndexPopulationJob(descriptorToPopulate.getKey(), false, SYSTEM);
populate(descriptorToPopulate.getValue(), indexMap, populationJob);
}
}
use of org.eclipse.collections.api.map.primitive.MutableLongObjectMap in project neo4j by neo4j.
the class RelationshipModifier method acquireMostOfTheNodeAndGroupsLocks.
private void acquireMostOfTheNodeAndGroupsLocks(RelationshipModifications modifications, RecordAccessSet recordChanges, ResourceLocker locks, LockTracer lockTracer, MutableLongObjectMap<NodeContext> contexts, MappedNodeDataLookup nodeDataLookup) {
/* Here we're going to figure out if we need to make changes to any node and/or relationship group records and lock them if we do. */
// We check modifications for each node, it might need locking. The iteration here is always sorted by node id
modifications.forEachSplit(byNode -> {
long nodeId = byNode.nodeId();
RecordProxy<NodeRecord, Void> nodeProxy = recordChanges.getNodeRecords().getOrLoad(nodeId, null, cursorContext);
// optimistic (unlocked) read
NodeRecord node = nodeProxy.forReadingLinkage();
boolean nodeIsAddedInTx = node.isCreated();
if (// we can not trust this as the node is not locked
!node.isDense()) {
if (// to avoid locking unnecessarily
!nodeIsAddedInTx) {
// lock and re-read, now we can trust it
locks.acquireExclusive(lockTracer, NODE, nodeId);
nodeProxy = recordChanges.getNodeRecords().getOrLoad(nodeId, null, cursorContext);
node = nodeProxy.forReadingLinkage();
if (node.isDense()) {
// another transaction just turned this node dense, unlock and let it be handled below
locks.releaseExclusive(NODE, nodeId);
} else if (byNode.hasCreations()) {
// Sparse node with added relationships. We might turn this node dense, at which point the group lock will be needed, so lock it
locks.acquireExclusive(lockTracer, RELATIONSHIP_GROUP, nodeId);
}
}
}
if (// the node is not locked but the dense node is a one-way transform so we can trust it
node.isDense()) {
// Stabilize first in chains, in case they are deleted or needed for chain degrees.
// We are preventing any changes to the group which in turn blocks any other relationship becomming the first in chain
locks.acquireShared(lockTracer, RELATIONSHIP_GROUP, nodeId);
// Creations
NodeContext nodeContext = NodeContext.createNodeContext(nodeProxy, memoryTracker);
contexts.put(nodeId, nodeContext);
if (byNode.hasCreations()) {
// We have some creations on a dense node. If the group exists we can use that, otherwise we create it
byNode.forEachCreationSplit(byType -> {
RelationshipGroupGetter.RelationshipGroupPosition groupPosition = findRelationshipGroup(recordChanges, nodeContext, byType);
nodeContext.setCurrentGroup(groupPosition.group() != null ? groupPosition.group() : groupPosition.closestPrevious());
RecordProxy<RelationshipGroupRecord, Integer> groupProxy = groupPosition.group();
if (groupProxy == null) {
// The group did not exist
if (!nodeContext.hasExclusiveGroupLock()) {
// And we did not already have the lock, so we need to upgrade to exclusive create it
locks.releaseShared(RELATIONSHIP_GROUP, nodeId);
// Note the small window here where we dont hold any group lock, things might change so we can not trust previous group reads
locks.acquireExclusive(lockTracer, NODE, nodeId);
locks.acquireExclusive(lockTracer, RELATIONSHIP_GROUP, nodeId);
}
nodeContext.setNode(recordChanges.getNodeRecords().getOrLoad(nodeId, null, cursorContext));
long groupStartingId = nodeContext.node().forReadingLinkage().getNextRel();
long groupStartingPrevId = NULL_REFERENCE.longValue();
if (groupPosition.closestPrevious() != null) {
groupStartingId = groupPosition.closestPrevious().getKey();
groupStartingPrevId = groupPosition.closestPrevious().forReadingLinkage().getPrev();
}
// At this point the group is locked so we can create it
groupProxy = relGroupGetter.getOrCreateRelationshipGroup(nodeContext.node(), byType.type(), recordChanges.getRelGroupRecords(), groupStartingPrevId, groupStartingId);
// another transaction might beat us at this point, so we are not guaranteed to be the creator but we can trust it to exist
if (!nodeContext.hasExclusiveGroupLock()) {
nodeContext.markExclusiveGroupLock();
} else if (groupProxy.isCreated()) {
// When a new group is created we can no longer trust the pointers of the cache
nodeContext.clearDenseContext();
}
}
nodeContext.denseContext(byType.type()).setGroup(groupProxy);
});
if (!nodeContext.hasExclusiveGroupLock()) {
// No other path has given us the exclusive lock yet
byNode.forEachCreationSplitInterruptible(byType -> {
// But if we are creating relationships to a chain that does not exist on the group
// or we might need to flip the external degrees flag
RelationshipGroupRecord group = nodeContext.denseContext(byType.type()).group().forReadingLinkage();
if (byType.hasOut() && (!group.hasExternalDegreesOut() || isNull(group.getFirstOut())) || byType.hasIn() && (!group.hasExternalDegreesIn() || isNull(group.getFirstIn())) || byType.hasLoop() && (!group.hasExternalDegreesLoop() || isNull(group.getFirstLoop()))) {
// Then we need the exclusive lock to change it
locks.releaseShared(RELATIONSHIP_GROUP, nodeId);
// Note the small window here where we dont hold any group lock, things might change so we can not trust previous group reads
locks.acquireExclusive(lockTracer, RELATIONSHIP_GROUP, nodeId);
nodeContext.markExclusiveGroupLock();
// And we can abort the iteration as the group lock is protecting all relationship group records of the node
return true;
}
return false;
});
}
}
// Deletions
if (byNode.hasDeletions()) {
if (// no need to do anything if it is already locked by additions
!nodeContext.hasExclusiveGroupLock()) {
byNode.forEachDeletionSplitInterruptible(byType -> {
NodeContext.DenseContext denseContext = nodeContext.denseContext(byType.type());
RelationshipGroupRecord group = denseContext.getOrLoadGroup(relGroupGetter, nodeContext.node().forReadingLinkage(), byType.type(), recordChanges.getRelGroupRecords(), cursorContext);
// here we have the shared lock, so we can trust the read
if (byType.hasOut() && !group.hasExternalDegreesOut() || byType.hasIn() && !group.hasExternalDegreesIn() || byType.hasLoop() && !group.hasExternalDegreesLoop()) {
// We have deletions but without external degrees, we might need to flip that so we lock it
locks.releaseShared(RELATIONSHIP_GROUP, nodeId);
// Note the small window here where we dont hold any group lock, things might change so we can not trust previous group reads
locks.acquireExclusive(lockTracer, RELATIONSHIP_GROUP, nodeId);
nodeContext.markExclusiveGroupLock();
return true;
} else {
// We have deletions and only external degrees
boolean hasAnyFirst = batchContains(byType.out(), group.getFirstOut()) || batchContains(byType.in(), group.getFirstIn()) || batchContains(byType.loop(), group.getFirstLoop());
if (hasAnyFirst) {
// But we're deleting the first in the chain so the group needs to be updated
locks.releaseShared(RELATIONSHIP_GROUP, nodeId);
// Note the small window here where we dont hold any group lock, things might change so we can not trust previous group reads
locks.acquireExclusive(lockTracer, RELATIONSHIP_GROUP, nodeId);
nodeContext.markExclusiveGroupLock();
return true;
}
}
return false;
});
}
}
// Look for an opportunity to delete empty groups that we noticed while looking for groups above
if (nodeContext.hasExclusiveGroupLock() && nodeContext.hasAnyEmptyGroup()) {
// There may be one or more empty groups that we can delete
if (locks.tryExclusiveLock(NODE_RELATIONSHIP_GROUP_DELETE, nodeId)) {
// We got the EXCLUSIVE group lock so we can go ahead and try to remove any potentially empty groups
if (!nodeContext.hasEmptyFirstGroup() || locks.tryExclusiveLock(NODE, nodeId)) {
if (nodeContext.hasEmptyFirstGroup()) {
// It's possible that we need to delete the first group, i.e. we just now locked the node and therefore need to re-read it
nodeContext.setNode(recordChanges.getNodeRecords().getOrLoad(nodeId, null, cursorContext));
}
Predicate<RelationshipGroupRecord> canDeleteGroup = group -> !byNode.hasCreations(group.getType());
if (RelationshipGroupGetter.deleteEmptyGroups(nodeContext.node(), canDeleteGroup, nodeDataLookup)) {
nodeContext.clearDenseContext();
}
}
}
}
}
});
}
use of org.eclipse.collections.api.map.primitive.MutableLongObjectMap in project neo4j by neo4j.
the class IndexingService method start.
// Recovery semantics: This is to be called after init, and after the database has run recovery.
@Override
public void start() throws Exception {
state = State.STARTING;
// Recovery will not do refresh (update read views) while applying recovered transactions and instead
// do it at one point after recovery... i.e. here
indexMapRef.indexMapSnapshot().forEachIndexProxy(indexProxyOperation("refresh", IndexProxy::refresh));
final MutableLongObjectMap<IndexDescriptor> rebuildingDescriptors = new LongObjectHashMap<>();
indexMapRef.modify(indexMap -> {
Map<InternalIndexState, List<IndexLogRecord>> indexStates = new EnumMap<>(InternalIndexState.class);
Map<IndexProviderDescriptor, List<IndexLogRecord>> indexProviders = new HashMap<>();
// Find all indexes that are not already online, do not require rebuilding, and create them
indexMap.forEachIndexProxy((indexId, proxy) -> {
InternalIndexState state = proxy.getState();
IndexDescriptor descriptor = proxy.getDescriptor();
IndexProviderDescriptor providerDescriptor = descriptor.getIndexProvider();
IndexLogRecord indexLogRecord = new IndexLogRecord(descriptor);
indexStates.computeIfAbsent(state, internalIndexState -> new ArrayList<>()).add(indexLogRecord);
indexProviders.computeIfAbsent(providerDescriptor, indexProviderDescriptor -> new ArrayList<>()).add(indexLogRecord);
internalLog.debug(indexStateInfo("start", state, descriptor));
switch(state) {
case ONLINE:
case FAILED:
proxy.start();
break;
case POPULATING:
// Remember for rebuilding right below in this method
rebuildingDescriptors.put(indexId, descriptor);
break;
default:
throw new IllegalStateException("Unknown state: " + state);
}
});
logIndexStateSummary("start", indexStates);
logIndexProviderSummary(indexProviders);
dontRebuildIndexesInReadOnlyMode(rebuildingDescriptors);
// Drop placeholder proxies for indexes that need to be rebuilt
dropRecoveringIndexes(indexMap, rebuildingDescriptors.keySet());
// Rebuild indexes by recreating and repopulating them
populateIndexesOfAllTypes(rebuildingDescriptors, indexMap);
return indexMap;
});
indexStatisticsStore.start();
samplingController.recoverIndexSamples();
samplingController.start();
// So at this point we've started population of indexes that needs to be rebuilt in the background.
// Indexes backing uniqueness constraints are normally built within the transaction creating the constraint
// and so we shouldn't leave such indexes in a populating state after recovery.
// This is why we now go and wait for those indexes to be fully populated.
rebuildingDescriptors.forEachKeyValue((indexId, index) -> {
if (!index.isUnique()) {
// It's not a uniqueness constraint, so don't wait for it to be rebuilt
return;
}
IndexProxy proxy;
try {
proxy = getIndexProxy(index);
} catch (IndexNotFoundKernelException e) {
throw new IllegalStateException("What? This index was seen during recovery just now, why isn't it available now?", e);
}
if (proxy.getDescriptor().getOwningConstraintId().isEmpty()) {
// so there's no gain in waiting for this index.
return;
}
monitor.awaitingPopulationOfRecoveredIndex(index);
awaitOnlineAfterRecovery(proxy);
});
state = State.RUNNING;
}
Aggregations