use of org.janusgraph.diskstorage.indexing.IndexEntry in project janusgraph by JanusGraph.
the class StandardJanusGraph method prepareCommit.
public ModificationSummary prepareCommit(final Collection<InternalRelation> addedRelations, final Collection<InternalRelation> deletedRelations, final Predicate<InternalRelation> filter, final BackendTransaction mutator, final StandardJanusGraphTx tx, final boolean acquireLocks) throws BackendException {
ListMultimap<Long, InternalRelation> mutations = ArrayListMultimap.create();
ListMultimap<InternalVertex, InternalRelation> mutatedProperties = ArrayListMultimap.create();
List<IndexSerializer.IndexUpdate> indexUpdates = Lists.newArrayList();
// 1) Collect deleted edges and their index updates and acquire edge locks
for (InternalRelation del : Iterables.filter(deletedRelations, filter)) {
Preconditions.checkArgument(del.isRemoved());
for (int pos = 0; pos < del.getLen(); pos++) {
InternalVertex vertex = del.getVertex(pos);
if (pos == 0 || !del.isLoop()) {
if (del.isProperty())
mutatedProperties.put(vertex, del);
mutations.put(vertex.longId(), del);
}
if (acquireLock(del, pos, acquireLocks)) {
Entry entry = edgeSerializer.writeRelation(del, pos, tx);
mutator.acquireEdgeLock(idManager.getKey(vertex.longId()), entry);
}
}
indexUpdates.addAll(indexSerializer.getIndexUpdates(del));
}
// 2) Collect added edges and their index updates and acquire edge locks
for (InternalRelation add : Iterables.filter(addedRelations, filter)) {
Preconditions.checkArgument(add.isNew());
for (int pos = 0; pos < add.getLen(); pos++) {
InternalVertex vertex = add.getVertex(pos);
if (pos == 0 || !add.isLoop()) {
if (add.isProperty())
mutatedProperties.put(vertex, add);
mutations.put(vertex.longId(), add);
}
if (!vertex.isNew() && acquireLock(add, pos, acquireLocks)) {
Entry entry = edgeSerializer.writeRelation(add, pos, tx);
mutator.acquireEdgeLock(idManager.getKey(vertex.longId()), entry.getColumn());
}
}
indexUpdates.addAll(indexSerializer.getIndexUpdates(add));
}
// 3) Collect all index update for vertices
for (InternalVertex v : mutatedProperties.keySet()) {
indexUpdates.addAll(indexSerializer.getIndexUpdates(v, mutatedProperties.get(v)));
}
// 4) Acquire index locks (deletions first)
for (IndexSerializer.IndexUpdate update : indexUpdates) {
if (!update.isCompositeIndex() || !update.isDeletion())
continue;
CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
if (acquireLock(iIndex, acquireLocks)) {
mutator.acquireIndexLock((StaticBuffer) update.getKey(), (Entry) update.getEntry());
}
}
for (IndexSerializer.IndexUpdate update : indexUpdates) {
if (!update.isCompositeIndex() || !update.isAddition())
continue;
CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
if (acquireLock(iIndex, acquireLocks)) {
mutator.acquireIndexLock((StaticBuffer) update.getKey(), ((Entry) update.getEntry()).getColumn());
}
}
// 5) Add relation mutations
for (Long vertexId : mutations.keySet()) {
Preconditions.checkArgument(vertexId > 0, "Vertex has no id: %s", vertexId);
final List<InternalRelation> edges = mutations.get(vertexId);
final List<Entry> additions = new ArrayList<>(edges.size());
final List<Entry> deletions = new ArrayList<>(Math.max(10, edges.size() / 10));
for (final InternalRelation edge : edges) {
final InternalRelationType baseType = (InternalRelationType) edge.getType();
assert baseType.getBaseType() == null;
for (InternalRelationType type : baseType.getRelationIndexes()) {
if (type.getStatus() == SchemaStatus.DISABLED)
continue;
for (int pos = 0; pos < edge.getArity(); pos++) {
if (!type.isUnidirected(Direction.BOTH) && !type.isUnidirected(EdgeDirection.fromPosition(pos)))
// Directionality is not covered
continue;
if (edge.getVertex(pos).longId() == vertexId) {
StaticArrayEntry entry = edgeSerializer.writeRelation(edge, type, pos, tx);
if (edge.isRemoved()) {
deletions.add(entry);
} else {
Preconditions.checkArgument(edge.isNew());
int ttl = getTTL(edge);
if (ttl > 0) {
entry.setMetaData(EntryMetaData.TTL, ttl);
}
additions.add(entry);
}
}
}
}
}
StaticBuffer vertexKey = idManager.getKey(vertexId);
mutator.mutateEdges(vertexKey, additions, deletions);
}
// 6) Add index updates
boolean has2iMods = false;
for (IndexSerializer.IndexUpdate indexUpdate : indexUpdates) {
assert indexUpdate.isAddition() || indexUpdate.isDeletion();
if (indexUpdate.isCompositeIndex()) {
final IndexSerializer.IndexUpdate<StaticBuffer, Entry> update = indexUpdate;
if (update.isAddition())
mutator.mutateIndex(update.getKey(), Lists.newArrayList(update.getEntry()), KCVSCache.NO_DELETIONS);
else
mutator.mutateIndex(update.getKey(), KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(update.getEntry()));
} else {
final IndexSerializer.IndexUpdate<String, IndexEntry> update = indexUpdate;
has2iMods = true;
IndexTransaction itx = mutator.getIndexTransaction(update.getIndex().getBackingIndexName());
String indexStore = ((MixedIndexType) update.getIndex()).getStoreName();
if (update.isAddition())
itx.add(indexStore, update.getKey(), update.getEntry(), update.getElement().isNew());
else
itx.delete(indexStore, update.getKey(), update.getEntry().field, update.getEntry().value, update.getElement().isRemoved());
}
}
return new ModificationSummary(!mutations.isEmpty(), has2iMods);
}
use of org.janusgraph.diskstorage.indexing.IndexEntry in project janusgraph by JanusGraph.
the class ElasticSearchIndex method getNewDocument.
public Map<String, Object> getNewDocument(final List<IndexEntry> additions, KeyInformation.StoreRetriever information) throws BackendException {
// JSON writes duplicate fields one after another, which forces us
// at this stage to make de-duplication on the IndexEntry list. We don't want to pay the
// price map storage on the Mutation level because none of other backends need that.
final Multimap<String, IndexEntry> unique = LinkedListMultimap.create();
for (final IndexEntry e : additions) {
unique.put(e.field, e);
}
final Map<String, Object> doc = new HashMap<>();
for (final Map.Entry<String, Collection<IndexEntry>> add : unique.asMap().entrySet()) {
final KeyInformation keyInformation = information.get(add.getKey());
final Object value;
switch(keyInformation.getCardinality()) {
case SINGLE:
value = convertToEsType(Iterators.getLast(add.getValue().iterator()).value, Mapping.getMapping(keyInformation));
break;
case SET:
case LIST:
value = add.getValue().stream().map(v -> convertToEsType(v.value, Mapping.getMapping(keyInformation))).filter(v -> {
Preconditions.checkArgument(!(v instanceof byte[]), "Collections not supported for " + add.getKey());
return true;
}).toArray();
break;
default:
value = null;
break;
}
doc.put(add.getKey(), value);
if (hasDualStringMapping(information.get(add.getKey())) && keyInformation.getDataType() == String.class) {
doc.put(getDualMappingName(add.getKey()), value);
}
}
return doc;
}
use of org.janusgraph.diskstorage.indexing.IndexEntry in project janusgraph by JanusGraph.
the class SolrIndex method handleRemovalsFromIndex.
private void handleRemovalsFromIndex(String collectionName, String keyIdField, String docId, List<IndexEntry> fieldDeletions, KeyInformation.IndexRetriever information) throws SolrServerException, IOException, BackendException {
final Map<String, String> fieldDeletes = new HashMap<>(1);
fieldDeletes.put("set", null);
final SolrInputDocument doc = new SolrInputDocument();
doc.addField(keyIdField, docId);
for (final IndexEntry v : fieldDeletions) {
final KeyInformation keyInformation = information.get(collectionName, v.field);
// If the cardinality is a Set or List, we just need to remove the individual value
// received in the mutation and not set the field to null, but we still consolidate the values
// in the event of multiple removals in one mutation.
final Map<String, Object> deletes = collectFieldValues(fieldDeletions, collectionName, information);
deletes.keySet().forEach(vertex -> {
final Map<String, Object> remove;
if (keyInformation.getCardinality() == Cardinality.SINGLE) {
remove = (Map) fieldDeletes;
} else {
remove = new HashMap<>(1);
remove.put("remove", deletes.get(vertex));
}
doc.setField(vertex, remove);
});
}
final UpdateRequest singleDocument = newUpdateRequest();
singleDocument.add(doc);
solrClient.request(singleDocument, collectionName);
}
use of org.janusgraph.diskstorage.indexing.IndexEntry in project janusgraph by JanusGraph.
the class SolrIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
logger.debug("Mutating SOLR");
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
final String collectionName = stores.getKey();
final String keyIdField = getKeyFieldId(collectionName);
final List<String> deleteIds = new ArrayList<>();
final Collection<SolrInputDocument> changes = new ArrayList<>();
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String docId = entry.getKey();
final IndexMutation mutation = entry.getValue();
Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());
// Handle any deletions
if (mutation.hasDeletions()) {
if (mutation.isDeleted()) {
logger.trace("Deleting entire document {}", docId);
deleteIds.add(docId);
} else {
final List<IndexEntry> fieldDeletions = new ArrayList<>(mutation.getDeletions());
if (mutation.hasAdditions()) {
for (final IndexEntry indexEntry : mutation.getAdditions()) {
fieldDeletions.remove(indexEntry);
}
}
handleRemovalsFromIndex(collectionName, keyIdField, docId, fieldDeletions, information);
}
}
if (mutation.hasAdditions()) {
final int ttl = mutation.determineTTL();
final SolrInputDocument doc = new SolrInputDocument();
doc.setField(keyIdField, docId);
final boolean isNewDoc = mutation.isNew();
if (isNewDoc)
logger.trace("Adding new document {}", docId);
final Map<String, Object> adds = collectFieldValues(mutation.getAdditions(), collectionName, information);
// If cardinality is not single then we should use the "add" operation to update
// the index so we don't overwrite existing values.
adds.keySet().forEach(v -> {
final KeyInformation keyInformation = information.get(collectionName, v);
final String solrOp = keyInformation.getCardinality() == Cardinality.SINGLE ? "set" : "add";
doc.setField(v, isNewDoc ? adds.get(v) : new HashMap<String, Object>(1) {
{
put(solrOp, adds.get(v));
}
});
});
if (ttl > 0) {
Preconditions.checkArgument(isNewDoc, "Solr only supports TTL on new documents [%s]", docId);
doc.setField(ttlField, String.format("+%dSECONDS", ttl));
}
changes.add(doc);
}
}
commitDeletes(collectionName, deleteIds);
commitChanges(collectionName, changes);
}
} catch (final IllegalArgumentException e) {
throw new PermanentBackendException("Unable to complete query on Solr.", e);
} catch (final Exception e) {
throw storageException(e);
}
}
Aggregations