use of com.thinkaurelius.titan.diskstorage.indexing.IndexEntry in project titan by thinkaurelius.
the class StandardTitanGraph method prepareCommit.
public ModificationSummary prepareCommit(final Collection<InternalRelation> addedRelations, final Collection<InternalRelation> deletedRelations, final Predicate<InternalRelation> filter, final BackendTransaction mutator, final StandardTitanTx tx, final boolean acquireLocks) throws BackendException {
ListMultimap<Long, InternalRelation> mutations = ArrayListMultimap.create();
ListMultimap<InternalVertex, InternalRelation> mutatedProperties = ArrayListMultimap.create();
List<IndexSerializer.IndexUpdate> indexUpdates = Lists.newArrayList();
//1) Collect deleted edges and their index updates and acquire edge locks
for (InternalRelation del : Iterables.filter(deletedRelations, filter)) {
Preconditions.checkArgument(del.isRemoved());
for (int pos = 0; pos < del.getLen(); pos++) {
InternalVertex vertex = del.getVertex(pos);
if (pos == 0 || !del.isLoop()) {
if (del.isProperty())
mutatedProperties.put(vertex, del);
mutations.put(vertex.longId(), del);
}
if (acquireLock(del, pos, acquireLocks)) {
Entry entry = edgeSerializer.writeRelation(del, pos, tx);
mutator.acquireEdgeLock(idManager.getKey(vertex.longId()), entry);
}
}
indexUpdates.addAll(indexSerializer.getIndexUpdates(del));
}
//2) Collect added edges and their index updates and acquire edge locks
for (InternalRelation add : Iterables.filter(addedRelations, filter)) {
Preconditions.checkArgument(add.isNew());
for (int pos = 0; pos < add.getLen(); pos++) {
InternalVertex vertex = add.getVertex(pos);
if (pos == 0 || !add.isLoop()) {
if (add.isProperty())
mutatedProperties.put(vertex, add);
mutations.put(vertex.longId(), add);
}
if (!vertex.isNew() && acquireLock(add, pos, acquireLocks)) {
Entry entry = edgeSerializer.writeRelation(add, pos, tx);
mutator.acquireEdgeLock(idManager.getKey(vertex.longId()), entry.getColumn());
}
}
indexUpdates.addAll(indexSerializer.getIndexUpdates(add));
}
//3) Collect all index update for vertices
for (InternalVertex v : mutatedProperties.keySet()) {
indexUpdates.addAll(indexSerializer.getIndexUpdates(v, mutatedProperties.get(v)));
}
//4) Acquire index locks (deletions first)
for (IndexSerializer.IndexUpdate update : indexUpdates) {
if (!update.isCompositeIndex() || !update.isDeletion())
continue;
CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
if (acquireLock(iIndex, acquireLocks)) {
mutator.acquireIndexLock((StaticBuffer) update.getKey(), (Entry) update.getEntry());
}
}
for (IndexSerializer.IndexUpdate update : indexUpdates) {
if (!update.isCompositeIndex() || !update.isAddition())
continue;
CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
if (acquireLock(iIndex, acquireLocks)) {
mutator.acquireIndexLock((StaticBuffer) update.getKey(), ((Entry) update.getEntry()).getColumn());
}
}
//5) Add relation mutations
for (Long vertexid : mutations.keySet()) {
Preconditions.checkArgument(vertexid > 0, "Vertex has no id: %s", vertexid);
List<InternalRelation> edges = mutations.get(vertexid);
List<Entry> additions = new ArrayList<Entry>(edges.size());
List<Entry> deletions = new ArrayList<Entry>(Math.max(10, edges.size() / 10));
for (InternalRelation edge : edges) {
InternalRelationType baseType = (InternalRelationType) edge.getType();
assert baseType.getBaseType() == null;
for (InternalRelationType type : baseType.getRelationIndexes()) {
if (type.getStatus() == SchemaStatus.DISABLED)
continue;
for (int pos = 0; pos < edge.getArity(); pos++) {
if (!type.isUnidirected(Direction.BOTH) && !type.isUnidirected(EdgeDirection.fromPosition(pos)))
//Directionality is not covered
continue;
if (edge.getVertex(pos).longId() == vertexid) {
StaticArrayEntry entry = edgeSerializer.writeRelation(edge, type, pos, tx);
if (edge.isRemoved()) {
deletions.add(entry);
} else {
Preconditions.checkArgument(edge.isNew());
int ttl = getTTL(edge);
if (ttl > 0) {
entry.setMetaData(EntryMetaData.TTL, ttl);
}
additions.add(entry);
}
}
}
}
}
StaticBuffer vertexKey = idManager.getKey(vertexid);
mutator.mutateEdges(vertexKey, additions, deletions);
}
//6) Add index updates
boolean has2iMods = false;
for (IndexSerializer.IndexUpdate indexUpdate : indexUpdates) {
assert indexUpdate.isAddition() || indexUpdate.isDeletion();
if (indexUpdate.isCompositeIndex()) {
IndexSerializer.IndexUpdate<StaticBuffer, Entry> update = indexUpdate;
if (update.isAddition())
mutator.mutateIndex(update.getKey(), Lists.newArrayList(update.getEntry()), KCVSCache.NO_DELETIONS);
else
mutator.mutateIndex(update.getKey(), KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(update.getEntry()));
} else {
IndexSerializer.IndexUpdate<String, IndexEntry> update = indexUpdate;
has2iMods = true;
IndexTransaction itx = mutator.getIndexTransaction(update.getIndex().getBackingIndexName());
String indexStore = ((MixedIndexType) update.getIndex()).getStoreName();
if (update.isAddition())
itx.add(indexStore, update.getKey(), update.getEntry(), update.getElement().isNew());
else
itx.delete(indexStore, update.getKey(), update.getEntry().field, update.getEntry().value, update.getElement().isRemoved());
}
}
return new ModificationSummary(!mutations.isEmpty(), has2iMods);
}
use of com.thinkaurelius.titan.diskstorage.indexing.IndexEntry in project atlas by apache.
the class Solr5Index method restore.
@Override
public void restore(Map<String, Map<String, List<IndexEntry>>> documents, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
try {
for (Map.Entry<String, Map<String, List<IndexEntry>>> stores : documents.entrySet()) {
final String collectionName = stores.getKey();
List<String> deleteIds = new ArrayList<>();
List<SolrInputDocument> newDocuments = new ArrayList<>();
for (Map.Entry<String, List<IndexEntry>> entry : stores.getValue().entrySet()) {
final String docID = entry.getKey();
final List<IndexEntry> content = entry.getValue();
if (content == null || content.isEmpty()) {
if (logger.isTraceEnabled())
logger.trace("Deleting document [{}]", docID);
deleteIds.add(docID);
continue;
}
newDocuments.add(new SolrInputDocument() {
{
setField(getKeyFieldId(collectionName), docID);
for (IndexEntry addition : content) {
Object fieldValue = addition.value;
setField(addition.field, convertValue(fieldValue));
}
}
});
}
commitDeletes(collectionName, deleteIds);
commitDocumentChanges(collectionName, newDocuments);
}
} catch (Exception e) {
throw new TemporaryBackendException("Could not restore Solr index", e);
}
}
use of com.thinkaurelius.titan.diskstorage.indexing.IndexEntry in project atlas by apache.
the class Solr5Index method deleteIndividualFieldsFromIndex.
private void deleteIndividualFieldsFromIndex(String collectionName, String keyIdField, String docId, HashSet<IndexEntry> fieldDeletions) throws SolrServerException, IOException {
if (fieldDeletions.isEmpty())
return;
Map<String, String> fieldDeletes = new HashMap<String, String>(1) {
{
put("set", null);
}
};
SolrInputDocument doc = new SolrInputDocument();
doc.addField(keyIdField, docId);
StringBuilder sb = new StringBuilder();
for (IndexEntry fieldToDelete : fieldDeletions) {
doc.addField(fieldToDelete.field, fieldDeletes);
sb.append(fieldToDelete).append(",");
}
if (logger.isTraceEnabled())
logger.trace("Deleting individual fields [{}] for document {}", sb.toString(), docId);
UpdateRequest singleDocument = newUpdateRequest();
singleDocument.add(doc);
solrClient.request(singleDocument, collectionName);
}
use of com.thinkaurelius.titan.diskstorage.indexing.IndexEntry in project incubator-atlas by apache.
the class Solr5Index method restore.
@Override
public void restore(Map<String, Map<String, List<IndexEntry>>> documents, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
try {
for (Map.Entry<String, Map<String, List<IndexEntry>>> stores : documents.entrySet()) {
final String collectionName = stores.getKey();
List<String> deleteIds = new ArrayList<>();
List<SolrInputDocument> newDocuments = new ArrayList<>();
for (Map.Entry<String, List<IndexEntry>> entry : stores.getValue().entrySet()) {
final String docID = entry.getKey();
final List<IndexEntry> content = entry.getValue();
if (content == null || content.isEmpty()) {
if (logger.isTraceEnabled())
logger.trace("Deleting document [{}]", docID);
deleteIds.add(docID);
continue;
}
newDocuments.add(new SolrInputDocument() {
{
setField(getKeyFieldId(collectionName), docID);
for (IndexEntry addition : content) {
Object fieldValue = addition.value;
setField(addition.field, convertValue(fieldValue));
}
}
});
}
commitDeletes(collectionName, deleteIds);
commitDocumentChanges(collectionName, newDocuments);
}
} catch (Exception e) {
throw new TemporaryBackendException("Could not restore Solr index", e);
}
}
use of com.thinkaurelius.titan.diskstorage.indexing.IndexEntry in project incubator-atlas by apache.
the class Solr5Index method deleteIndividualFieldsFromIndex.
private void deleteIndividualFieldsFromIndex(String collectionName, String keyIdField, String docId, HashSet<IndexEntry> fieldDeletions) throws SolrServerException, IOException {
if (fieldDeletions.isEmpty())
return;
Map<String, String> fieldDeletes = new HashMap<String, String>(1) {
{
put("set", null);
}
};
SolrInputDocument doc = new SolrInputDocument();
doc.addField(keyIdField, docId);
StringBuilder sb = new StringBuilder();
for (IndexEntry fieldToDelete : fieldDeletions) {
doc.addField(fieldToDelete.field, fieldDeletes);
sb.append(fieldToDelete).append(",");
}
if (logger.isTraceEnabled())
logger.trace("Deleting individual fields [{}] for document {}", sb.toString(), docId);
UpdateRequest singleDocument = newUpdateRequest();
singleDocument.add(doc);
solrClient.request(singleDocument, collectionName);
}
Aggregations