use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class MultiVertexCentricQueryBuilder method execute.
/* ---------------------------------------------------------------
* Query Execution
* ---------------------------------------------------------------
*/
/**
* Constructs the BaseVertexCentricQuery through {@link BasicVertexCentricQueryBuilder#constructQuery(com.thinkaurelius.titan.graphdb.internal.RelationCategory)}.
* If the query asks for an implicit key, the resulting map is computed and returned directly.
* If the query is empty, a map that maps each vertex to an empty list is returned.
* Otherwise, the query is executed for all vertices through the transaction which will effectively
* pre-load the return result sets into the associated {@link com.thinkaurelius.titan.graphdb.vertices.CacheVertex} or
* don't do anything at all if the vertex is new (and hence no edges in the storage backend).
* After that, a map is constructed that maps each vertex to the corresponding VertexCentricQuery and wrapped
* into a QueryProcessor. Hence, upon iteration the query will be executed like any other VertexCentricQuery
* with the performance difference that the SliceQueries will have already been preloaded and not further
* calls to the storage backend are needed.
*
* @param returnType
* @return
*/
protected <Q> Map<TitanVertex, Q> execute(RelationCategory returnType, ResultConstructor<Q> resultConstructor) {
Preconditions.checkArgument(!vertices.isEmpty(), "Need to add at least one vertex to query");
Map<TitanVertex, Q> result = new HashMap<TitanVertex, Q>(vertices.size());
BaseVertexCentricQuery bq = super.constructQuery(returnType);
profiler.setAnnotation(QueryProfiler.MULTIQUERY_ANNOTATION, true);
profiler.setAnnotation(QueryProfiler.NUMVERTICES_ANNOTATION, vertices.size());
if (!bq.isEmpty()) {
for (BackendQueryHolder<SliceQuery> sq : bq.getQueries()) {
Set<InternalVertex> adjVertices = Sets.newHashSet(vertices);
for (InternalVertex v : vertices) {
if (isPartitionedVertex(v)) {
profiler.setAnnotation(QueryProfiler.PARTITIONED_VERTEX_ANNOTATION, true);
adjVertices.remove(v);
adjVertices.addAll(allRequiredRepresentatives(v));
}
}
//Overwrite with more accurate size accounting for partitioned vertices
profiler.setAnnotation(QueryProfiler.NUMVERTICES_ANNOTATION, adjVertices.size());
tx.executeMultiQuery(adjVertices, sq.getBackendQuery(), sq.getProfiler());
}
for (InternalVertex v : vertices) {
result.put(v, resultConstructor.getResult(v, bq));
}
} else {
for (TitanVertex v : vertices) result.put(v, resultConstructor.emptyResult());
}
return result;
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class IndexRemoveJob method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
//The queries are already tailored enough => everything should be removed
try {
BackendTransaction mutator = writeTx.getTxHandle();
final List<Entry> deletions;
if (entries.size() == 1)
deletions = Iterables.getOnlyElement(entries.values());
else {
int size = IteratorUtils.stream(entries.values().iterator()).map(e -> e.size()).reduce(0, (x, y) -> x + y);
deletions = new ArrayList<>(size);
entries.values().forEach(e -> deletions.addAll(e));
}
metrics.incrementCustom(DELETED_RECORDS_COUNT, deletions.size());
if (isRelationTypeIndex()) {
mutator.mutateEdges(key, KCVSCache.NO_ADDITIONS, deletions);
} else {
mutator.mutateIndex(key, KCVSCache.NO_ADDITIONS, deletions);
}
} catch (final Exception e) {
mgmt.rollback();
writeTx.rollback();
metrics.incrementCustom(FAILED_TX);
throw new TitanException(e.getMessage(), e);
}
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class HadoopScanMapper method finishSetup.
protected void finishSetup(ModifiableHadoopConfiguration scanConf, Configuration graphConf) {
jobConf = getJobConfiguration(scanConf);
Preconditions.checkNotNull(metrics);
// Allowed to be null for jobs that specify no configuration and no configuration root
//Preconditions.checkNotNull(jobConf);
Preconditions.checkNotNull(job);
job.workerIterationStart(jobConf, graphConf, metrics);
keyFilter = job.getKeyFilter();
List<SliceQuery> sliceQueries = job.getQueries();
Preconditions.checkArgument(null != sliceQueries, "Job cannot specify null query list");
Preconditions.checkArgument(0 < sliceQueries.size(), "Job must specify at least one query");
// Assign head of getQueries() to "initialQuery"
initialQuery = sliceQueries.get(0);
// Assign tail of getQueries() to "subsequentQueries"
subsequentQueries = new ArrayList<>(sliceQueries.subList(1, sliceQueries.size()));
Preconditions.checkState(sliceQueries.size() == subsequentQueries.size() + 1);
Preconditions.checkNotNull(initialQuery);
if (0 < subsequentQueries.size()) {
//It is assumed that the first query is the grounding query if multiple queries exist
StaticBuffer start = initialQuery.getSliceStart();
Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be all 0s: %s", start);
StaticBuffer end = initialQuery.getSliceEnd();
Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s", end);
}
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class HadoopScanMapper method map.
@Override
protected void map(StaticBuffer key, Iterable<Entry> values, Context context) throws IOException, InterruptedException {
EntryArrayList al = EntryArrayList.of(values);
// KeyFilter check
if (!keyFilter.test(key)) {
log.debug("Skipping key {} based on KeyFilter", key);
return;
}
// InitialQuery check (at least one match is required or else the key is ignored)
EntryList initialQueryMatches = findEntriesMatchingQuery(initialQuery, al);
if (0 == initialQueryMatches.size()) {
log.debug("Skipping key {} based on InitialQuery ({}) match failure", key, initialQuery);
return;
}
// Both conditions (KeyFilter && InitialQuery) for invoking process are satisfied
// Create an entries parameter to be passed into the process method
Map<SliceQuery, EntryList> matches = new HashMap<>();
matches.put(initialQuery, initialQueryMatches);
// Find matches (if any are present) for noninitial queries
for (SliceQuery sq : subsequentQueries) {
matches.put(sq, findEntriesMatchingQuery(sq, al));
}
// Process
job.process(key, matches, metrics);
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class StandardTitanTx method executeMultiQuery.
public void executeMultiQuery(final Collection<InternalVertex> vertices, final SliceQuery sq, final QueryProfiler profiler) {
LongArrayList vids = new LongArrayList(vertices.size());
for (InternalVertex v : vertices) {
if (!v.isNew() && v.hasId() && (v instanceof CacheVertex) && !v.hasLoadedRelations(sq))
vids.add(v.longId());
}
if (!vids.isEmpty()) {
List<EntryList> results = QueryProfiler.profile(profiler, sq, true, q -> graph.edgeMultiQuery(vids, q, txHandle));
int pos = 0;
for (TitanVertex v : vertices) {
if (pos < vids.size() && vids.get(pos) == v.longId()) {
final EntryList vresults = results.get(pos);
((CacheVertex) v).loadRelations(sq, new Retriever<SliceQuery, EntryList>() {
@Override
public EntryList get(SliceQuery query) {
return vresults;
}
});
pos++;
}
}
}
}
Aggregations