use of org.janusgraph.diskstorage.indexing.IndexMutation in project janusgraph by JanusGraph.
the class LuceneIndex method mutateStores.
private void mutateStores(Map.Entry<String, Map<String, IndexMutation>> stores, KeyInformation.IndexRetriever information) throws IOException, BackendException {
IndexReader reader = null;
try {
final String storeName = stores.getKey();
final IndexWriter writer = getWriter(storeName, information);
reader = DirectoryReader.open(writer, true, true);
final IndexSearcher searcher = new IndexSearcher(reader);
final KeyInformation.StoreRetriever storeRetriever = information.get(storeName);
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String documentId = entry.getKey();
final IndexMutation mutation = entry.getValue();
if (mutation.isDeleted()) {
if (log.isTraceEnabled())
log.trace("Deleted entire document [{}]", documentId);
writer.deleteDocuments(new Term(DOCID, documentId));
continue;
}
final Document doc = retrieveOrCreate(documentId, searcher);
Preconditions.checkNotNull(doc);
for (final IndexEntry del : mutation.getDeletions()) {
Preconditions.checkArgument(!del.hasMetaData(), "Lucene index does not support indexing meta data: %s", del);
String fieldName = del.field;
if (log.isTraceEnabled()) {
log.trace("Removing field [{}] on document [{}]", fieldName, documentId);
}
KeyInformation ki = storeRetriever.get(fieldName);
removeField(doc, del, ki);
}
addToDocument(doc, mutation.getAdditions(), storeRetriever, false);
// write the old document to the index with the modifications
writer.updateDocument(new Term(DOCID, documentId), doc);
}
writer.commit();
} finally {
IOUtils.closeQuietly(reader);
}
}
use of org.janusgraph.diskstorage.indexing.IndexMutation in project janusgraph by JanusGraph.
the class ElasticSearchIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
final List<ElasticSearchMutation> requests = new ArrayList<>();
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
final List<ElasticSearchMutation> requestByStore = new ArrayList<>();
final String storeName = stores.getKey();
final String indexStoreName = getIndexStoreName(storeName);
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String documentId = entry.getKey();
final IndexMutation mutation = entry.getValue();
assert mutation.isConsolidated();
Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());
// Deletions first
if (mutation.hasDeletions()) {
if (mutation.isDeleted()) {
log.trace("Deleting entire document {}", documentId);
requestByStore.add(ElasticSearchMutation.createDeleteRequest(indexStoreName, storeName, documentId));
} else {
List<Map<String, Object>> params = getParameters(information.get(storeName), mutation.getDeletions(), true);
Map doc = compat.prepareStoredScript(parameterizedDeletionScriptId, params).build();
log.trace("Deletion script {} with params {}", PARAMETERIZED_DELETION_SCRIPT, params);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, doc));
}
}
if (mutation.hasAdditions()) {
if (mutation.isNew()) {
// Index
log.trace("Adding entire document {}", documentId);
final Map<String, Object> source = getNewDocument(mutation.getAdditions(), information.get(storeName));
requestByStore.add(ElasticSearchMutation.createIndexRequest(indexStoreName, storeName, documentId, source));
} else {
final Map upsert;
if (!mutation.hasDeletions()) {
upsert = getNewDocument(mutation.getAdditions(), information.get(storeName));
} else {
upsert = null;
}
List<Map<String, Object>> params = getParameters(information.get(storeName), mutation.getAdditions(), false, Cardinality.SINGLE);
if (!params.isEmpty()) {
ImmutableMap.Builder builder = compat.prepareStoredScript(parameterizedAdditionScriptId, params);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, builder, upsert));
log.trace("Adding script {} with params {}", PARAMETERIZED_ADDITION_SCRIPT, params);
}
final Map<String, Object> doc = getAdditionDoc(information, storeName, mutation);
if (!doc.isEmpty()) {
final ImmutableMap.Builder builder = ImmutableMap.builder().put(ES_DOC_KEY, doc);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, builder, upsert));
log.trace("Adding update {}", doc);
}
}
}
}
if (!requestByStore.isEmpty() && ingestPipelines.containsKey(storeName)) {
client.bulkRequest(requestByStore, String.valueOf(ingestPipelines.get(storeName)));
} else if (!requestByStore.isEmpty()) {
requests.addAll(requestByStore);
}
}
if (!requests.isEmpty()) {
client.bulkRequest(requests, null);
}
} catch (final Exception e) {
log.error("Failed to execute bulk Elasticsearch mutation", e);
throw convert(e);
}
}
use of org.janusgraph.diskstorage.indexing.IndexMutation in project janusgraph by JanusGraph.
the class SolrIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
logger.debug("Mutating SOLR");
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
final String collectionName = stores.getKey();
final String keyIdField = getKeyFieldId(collectionName);
final List<String> deleteIds = new ArrayList<>();
final Collection<SolrInputDocument> changes = new ArrayList<>();
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String docId = entry.getKey();
final IndexMutation mutation = entry.getValue();
Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());
// Handle any deletions
if (mutation.hasDeletions()) {
if (mutation.isDeleted()) {
logger.trace("Deleting entire document {}", docId);
deleteIds.add(docId);
} else {
final List<IndexEntry> fieldDeletions = new ArrayList<>(mutation.getDeletions());
if (mutation.hasAdditions()) {
for (final IndexEntry indexEntry : mutation.getAdditions()) {
fieldDeletions.remove(indexEntry);
}
}
handleRemovalsFromIndex(collectionName, keyIdField, docId, fieldDeletions, information);
}
}
if (mutation.hasAdditions()) {
final int ttl = mutation.determineTTL();
final SolrInputDocument doc = new SolrInputDocument();
doc.setField(keyIdField, docId);
final boolean isNewDoc = mutation.isNew();
if (isNewDoc)
logger.trace("Adding new document {}", docId);
final Map<String, Object> adds = collectFieldValues(mutation.getAdditions(), collectionName, information);
// If cardinality is not single then we should use the "add" operation to update
// the index so we don't overwrite existing values.
adds.keySet().forEach(v -> {
final KeyInformation keyInformation = information.get(collectionName, v);
final String solrOp = keyInformation.getCardinality() == Cardinality.SINGLE ? "set" : "add";
doc.setField(v, isNewDoc ? adds.get(v) : new HashMap<String, Object>(1) {
{
put(solrOp, adds.get(v));
}
});
});
if (ttl > 0) {
Preconditions.checkArgument(isNewDoc, "Solr only supports TTL on new documents [%s]", docId);
doc.setField(ttlField, String.format("+%dSECONDS", ttl));
}
changes.add(doc);
}
}
commitDeletes(collectionName, deleteIds);
commitChanges(collectionName, changes);
}
} catch (final IllegalArgumentException e) {
throw new PermanentBackendException("Unable to complete query on Solr.", e);
} catch (final Exception e) {
throw storageException(e);
}
}
Aggregations