use of org.neo4j.kernel.impl.store.record.PropertyKeyTokenRecord in project neo4j by neo4j.
the class LogCommandSerializationV3_0_10 method readPropertyKeyTokenCommand.
@Override
protected Command readPropertyKeyTokenCommand(ReadableChannel channel) throws IOException {
int id = channel.getInt();
PropertyKeyTokenRecord before = readPropertyKeyTokenRecord(id, channel);
if (before == null) {
return null;
}
PropertyKeyTokenRecord after = readPropertyKeyTokenRecord(id, channel);
if (after == null) {
return null;
}
return new Command.PropertyKeyTokenCommand(this, before, after);
}
use of org.neo4j.kernel.impl.store.record.PropertyKeyTokenRecord in project neo4j by neo4j.
the class LogCommandSerializationV4_0 method readPropertyKeyTokenCommand.
@Override
protected Command readPropertyKeyTokenCommand(ReadableChannel channel) throws IOException {
int id = channel.getInt();
PropertyKeyTokenRecord before = readPropertyKeyTokenRecord(id, channel);
if (before == null) {
return null;
}
PropertyKeyTokenRecord after = readPropertyKeyTokenRecord(id, channel);
if (after == null) {
return null;
}
markAfterRecordAsCreatedIfCommandLooksCreated(before, after);
// DynamicRecord has the created flag stored inside them because it's much harder to tell by looking at the command whether or not they are created
return new Command.PropertyKeyTokenCommand(this, before, after);
}
use of org.neo4j.kernel.impl.store.record.PropertyKeyTokenRecord in project neo4j by neo4j.
the class BatchingTokenRepositoryTest method shouldFlushNewTokens.
@Test
void shouldFlushNewTokens() {
// given
try (NeoStores stores = newNeoStores(StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME)) {
TokenStore<PropertyKeyTokenRecord> tokenStore = stores.getPropertyKeyTokenStore();
int rounds = 3;
int tokensPerRound = 4;
BatchingPropertyKeyTokenRepository repo = new BatchingPropertyKeyTokenRepository(tokenStore);
// when first creating some tokens
int expectedId = 0;
int tokenNameAsInt = 0;
for (int round = 0; round < rounds; round++) {
for (int i = 0; i < tokensPerRound; i++) {
int tokenId = repo.getOrCreateId(String.valueOf(tokenNameAsInt++));
assertEquals(expectedId + i, tokenId);
}
assertEquals(expectedId, tokenStore.getHighId());
repo.flush(NULL);
assertEquals(expectedId + tokensPerRound, tokenStore.getHighId());
expectedId += tokensPerRound;
}
repo.flush(NULL);
List<NamedToken> tokens = tokenStore.getTokens(NULL);
assertEquals(tokensPerRound * rounds, tokens.size());
for (NamedToken token : tokens) {
assertEquals(token.id(), parseInt(token.name()));
}
}
}
use of org.neo4j.kernel.impl.store.record.PropertyKeyTokenRecord in project neo4j by neo4j.
the class Commands method createPropertyKeyToken.
public static PropertyKeyTokenCommand createPropertyKeyToken(int id, int nameId) {
PropertyKeyTokenRecord before = new PropertyKeyTokenRecord(id);
PropertyKeyTokenRecord after = new PropertyKeyTokenRecord(id);
populateTokenRecord(after, nameId);
return new PropertyKeyTokenCommand(before, after);
}
use of org.neo4j.kernel.impl.store.record.PropertyKeyTokenRecord in project neo4j by neo4j.
the class TransactionRecordState method extractCommands.
@Override
public void extractCommands(Collection<StorageCommand> commands, MemoryTracker memoryTracker) throws TransactionFailureException {
assert !prepared : "Transaction has already been prepared";
integrityValidator.validateTransactionStartKnowledge(lastCommittedTxWhenTransactionStarted);
int noOfCommands = recordChangeSet.changeSize();
var labelTokenChanges = recordChangeSet.getLabelTokenChanges().changes();
memoryTracker.allocateHeap(labelTokenChanges.size() * Command.LabelTokenCommand.HEAP_SIZE);
for (RecordProxy<LabelTokenRecord, Void> record : labelTokenChanges) {
commands.add(new Command.LabelTokenCommand(commandSerialization, record.getBefore(), record.forReadingLinkage()));
}
var relationshipTypeTokenChanges = recordChangeSet.getRelationshipTypeTokenChanges().changes();
memoryTracker.allocateHeap(relationshipTypeTokenChanges.size() * Command.RelationshipTypeTokenCommand.HEAP_SIZE);
for (RecordProxy<RelationshipTypeTokenRecord, Void> record : relationshipTypeTokenChanges) {
commands.add(new Command.RelationshipTypeTokenCommand(commandSerialization, record.getBefore(), record.forReadingLinkage()));
}
var propertyKeyTokenChanges = recordChangeSet.getPropertyKeyTokenChanges().changes();
memoryTracker.allocateHeap(propertyKeyTokenChanges.size() * Command.PropertyKeyTokenCommand.HEAP_SIZE);
for (RecordProxy<PropertyKeyTokenRecord, Void> record : propertyKeyTokenChanges) {
commands.add(new Command.PropertyKeyTokenCommand(commandSerialization, record.getBefore(), record.forReadingLinkage()));
}
// Collect nodes, relationships, properties
Command[] nodeCommands = EMPTY_COMMANDS;
int skippedCommands = 0;
var nodeChanges = recordChangeSet.getNodeRecords().changes();
if (!nodeChanges.isEmpty()) {
memoryTracker.allocateHeap(nodeChanges.size() * Command.NodeCommand.HEAP_SIZE);
nodeCommands = new Command[nodeChanges.size()];
int i = 0;
for (RecordProxy<NodeRecord, Void> change : nodeChanges) {
NodeRecord record = prepared(change, nodeStore);
IntegrityValidator.validateNodeRecord(record);
nodeCommands[i++] = new Command.NodeCommand(commandSerialization, change.getBefore(), record);
}
Arrays.sort(nodeCommands, COMMAND_COMPARATOR);
}
Command[] relCommands = EMPTY_COMMANDS;
var relationshipChanges = recordChangeSet.getRelRecords().changes();
if (!relationshipChanges.isEmpty()) {
memoryTracker.allocateHeap(relationshipChanges.size() * Command.RelationshipCommand.HEAP_SIZE);
relCommands = new Command[relationshipChanges.size()];
int i = 0;
for (RecordProxy<RelationshipRecord, Void> change : relationshipChanges) {
relCommands[i++] = new Command.RelationshipCommand(commandSerialization, change.getBefore(), prepared(change, relationshipStore));
}
Arrays.sort(relCommands, COMMAND_COMPARATOR);
}
Command[] propCommands = EMPTY_COMMANDS;
var propertyChanges = recordChangeSet.getPropertyRecords().changes();
if (!propertyChanges.isEmpty()) {
memoryTracker.allocateHeap(propertyChanges.size() * Command.PropertyCommand.HEAP_SIZE);
propCommands = new Command[propertyChanges.size()];
int i = 0;
for (RecordProxy<PropertyRecord, PrimitiveRecord> change : propertyChanges) {
propCommands[i++] = new Command.PropertyCommand(commandSerialization, change.getBefore(), prepared(change, propertyStore));
}
Arrays.sort(propCommands, COMMAND_COMPARATOR);
}
Command[] relGroupCommands = EMPTY_COMMANDS;
var relationshipGroupChanges = recordChangeSet.getRelGroupRecords().changes();
if (!relationshipGroupChanges.isEmpty()) {
memoryTracker.allocateHeap(relationshipGroupChanges.size() * Command.RelationshipGroupCommand.HEAP_SIZE);
relGroupCommands = new Command[relationshipGroupChanges.size()];
int i = 0;
for (RecordProxy<RelationshipGroupRecord, Integer> change : relationshipGroupChanges) {
if (change.isCreated() && !change.forReadingLinkage().inUse()) {
/*
* This is an edge case that may come up and which we must handle properly. Relationship groups are
* not managed by the tx state, since they are created as side effects rather than through
* direct calls. However, they differ from say, dynamic records, in that their management can happen
* through separate code paths. What we are interested in here is the following scenario.
* 0. A node has one less relationship that is required to transition to dense node. The relationships
* it has belong to at least two different types
* 1. In the same tx, a relationship is added making the node dense and all the relationships of a type
* are removed from that node. Regardless of the order these operations happen, the creation of the
* relationship (and the transition of the node to dense) will happen first.
* 2. A relationship group will be created because of the transition to dense and then deleted because
* all the relationships it would hold are no longer there. This results in a relationship group
* command that appears in the tx as not in use. Depending on the final order of operations, this
* can end up using an id that is higher than the highest id seen so far. This may not be a problem
* for a single instance, but it can result in errors in cases where transactions are applied
* externally, such as backup.
*
* The way we deal with this issue here is by not issuing a command for that offending record. This is
* safe, since the record is not in use and never was, so the high id is not necessary to change and
* the store remains consistent.
*/
skippedCommands++;
continue;
}
relGroupCommands[i++] = new Command.RelationshipGroupCommand(commandSerialization, change.getBefore(), prepared(change, relationshipGroupStore));
}
relGroupCommands = i < relGroupCommands.length ? Arrays.copyOf(relGroupCommands, i) : relGroupCommands;
Arrays.sort(relGroupCommands, COMMAND_COMPARATOR);
}
addFiltered(commands, Mode.CREATE, propCommands, relCommands, relGroupCommands, nodeCommands);
addFiltered(commands, Mode.UPDATE, propCommands, relCommands, relGroupCommands, nodeCommands);
addFiltered(commands, Mode.DELETE, relCommands, relGroupCommands, nodeCommands);
EnumMap<Mode, List<Command>> schemaChangeByMode = new EnumMap<>(Mode.class);
var schemaRuleChange = recordChangeSet.getSchemaRuleChanges().changes();
memoryTracker.allocateHeap(schemaRuleChange.size() * Command.SchemaRuleCommand.HEAP_SIZE);
for (RecordProxy<SchemaRecord, SchemaRule> change : schemaRuleChange) {
SchemaRecord schemaRecord = change.forReadingLinkage();
SchemaRule rule = change.getAdditionalData();
if (schemaRecord.inUse()) {
integrityValidator.validateSchemaRule(rule);
}
Command.SchemaRuleCommand cmd = new Command.SchemaRuleCommand(commandSerialization, change.getBefore(), change.forChangingData(), rule);
schemaChangeByMode.computeIfAbsent(cmd.getMode(), MODE_TO_ARRAY_LIST).add(cmd);
}
commands.addAll(schemaChangeByMode.getOrDefault(Mode.DELETE, Collections.emptyList()));
commands.addAll(schemaChangeByMode.getOrDefault(Mode.CREATE, Collections.emptyList()));
commands.addAll(schemaChangeByMode.getOrDefault(Mode.UPDATE, Collections.emptyList()));
// Add deleted property commands last, so they happen after the schema record changes.
// This extends the lifetime of property records just past the last moment of use,
// and prevents reading and deleting of schema records from racing, and making the
// schema records look malformed.
addFiltered(commands, Mode.DELETE, propCommands);
assert commands.size() == noOfCommands - skippedCommands : format("Expected %d final commands, got %d " + "instead, with %d skipped", noOfCommands, commands.size(), skippedCommands);
if (groupDegreesUpdater.degrees != null) {
memoryTracker.allocateHeap(groupDegreesUpdater.degrees.size() * Command.GroupDegreeCommand.SHALLOW_SIZE);
groupDegreesUpdater.degrees.forEachKeyValue((key, delta) -> {
if (delta.longValue() != 0) {
long groupId = Command.GroupDegreeCommand.groupIdFromCombinedKey(key);
RelationshipDirection direction = Command.GroupDegreeCommand.directionFromCombinedKey(key);
commands.add(new Command.GroupDegreeCommand(groupId, direction, delta.longValue()));
}
});
}
prepared = true;
}
Aggregations