use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class EdgeSerializer method getQuery.
public SliceQuery getQuery(InternalRelationType type, Direction dir, TypedInterval[] sortKey) {
Preconditions.checkNotNull(type);
Preconditions.checkNotNull(dir);
Preconditions.checkArgument(type.isUnidirected(Direction.BOTH) || type.isUnidirected(dir));
StaticBuffer sliceStart = null, sliceEnd = null;
RelationCategory rt = type.isPropertyKey() ? RelationCategory.PROPERTY : RelationCategory.EDGE;
if (dir == Direction.BOTH) {
assert type.isEdgeLabel();
sliceStart = IDHandler.getRelationType(type.longId(), getDirID(Direction.OUT, rt), type.isInvisibleType());
sliceEnd = IDHandler.getRelationType(type.longId(), getDirID(Direction.IN, rt), type.isInvisibleType());
assert sliceStart.compareTo(sliceEnd) < 0;
sliceEnd = BufferUtil.nextBiggerBuffer(sliceEnd);
} else {
DirectionID dirID = getDirID(dir, rt);
DataOutput colStart = serializer.getDataOutput(DEFAULT_COLUMN_CAPACITY);
DataOutput colEnd = serializer.getDataOutput(DEFAULT_COLUMN_CAPACITY);
IDHandler.writeRelationType(colStart, type.longId(), dirID, type.isInvisibleType());
IDHandler.writeRelationType(colEnd, type.longId(), dirID, type.isInvisibleType());
long[] sortKeyIDs = type.getSortKey();
Preconditions.checkArgument(sortKey.length >= sortKeyIDs.length);
assert colStart.getPosition() == colEnd.getPosition();
int keyStartPos = colStart.getPosition();
int keyEndPos = -1;
for (int i = 0; i < sortKey.length && sortKey[i] != null; i++) {
PropertyKey skey = sortKey[i].key;
Interval interval = sortKey[i].interval;
if (i >= sortKeyIDs.length) {
assert !type.multiplicity().isUnique(dir);
assert (skey instanceof ImplicitKey) && (skey == ImplicitKey.TITANID || skey == ImplicitKey.ADJACENT_ID);
assert skey != ImplicitKey.ADJACENT_ID || (i == sortKeyIDs.length);
assert skey != ImplicitKey.TITANID || (!type.multiplicity().isConstrained() && (i == sortKeyIDs.length && skey.isPropertyKey() || i == sortKeyIDs.length + 1 && skey.isEdgeLabel()));
assert colStart.getPosition() == colEnd.getPosition();
assert interval == null || interval.isPoints();
keyEndPos = colStart.getPosition();
} else {
assert !type.multiplicity().isConstrained();
assert skey.longId() == sortKeyIDs[i];
}
if (interval == null || interval.isEmpty()) {
break;
}
if (interval.isPoints()) {
if (skey == ImplicitKey.TITANID || skey == ImplicitKey.ADJACENT_ID) {
assert !type.multiplicity().isUnique(dir);
VariableLong.writePositiveBackward(colStart, (Long) interval.getStart());
VariableLong.writePositiveBackward(colEnd, (Long) interval.getEnd());
} else {
writeInline(colStart, skey, interval.getStart(), InlineType.KEY);
writeInline(colEnd, skey, interval.getEnd(), InlineType.KEY);
}
} else {
if (interval.getStart() != null)
writeInline(colStart, skey, interval.getStart(), InlineType.KEY);
if (interval.getEnd() != null)
writeInline(colEnd, skey, interval.getEnd(), InlineType.KEY);
switch(type.getSortOrder()) {
case ASC:
sliceStart = colStart.getStaticBuffer();
sliceEnd = colEnd.getStaticBuffer();
if (!interval.startInclusive())
sliceStart = BufferUtil.nextBiggerBuffer(sliceStart);
if (interval.endInclusive())
sliceEnd = BufferUtil.nextBiggerBuffer(sliceEnd);
break;
case DESC:
sliceEnd = colStart.getStaticBufferFlipBytes(keyStartPos, colStart.getPosition());
sliceStart = colEnd.getStaticBufferFlipBytes(keyStartPos, colEnd.getPosition());
if (interval.startInclusive())
sliceEnd = BufferUtil.nextBiggerBuffer(sliceEnd);
if (!interval.endInclusive())
sliceStart = BufferUtil.nextBiggerBuffer(sliceStart);
break;
default:
throw new AssertionError(type.getSortOrder().toString());
}
assert sliceStart.compareTo(sliceEnd) <= 0;
break;
}
}
if (sliceStart == null) {
assert sliceEnd == null && colStart.getPosition() == colEnd.getPosition();
if (keyEndPos < 0)
keyEndPos = colStart.getPosition();
switch(type.getSortOrder()) {
case ASC:
sliceStart = colStart.getStaticBuffer();
break;
case DESC:
sliceStart = colStart.getStaticBufferFlipBytes(keyStartPos, keyEndPos);
break;
default:
throw new AssertionError(type.getSortOrder().toString());
}
sliceEnd = BufferUtil.nextBiggerBuffer(sliceStart);
}
}
return new SliceQuery(sliceStart, sliceEnd);
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class VertexJobConverter method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
long vertexId = getVertexId(key);
assert entries.get(VERTEX_EXISTS_QUERY) != null;
if (isGhostVertex(vertexId, entries.get(VERTEX_EXISTS_QUERY))) {
metrics.incrementCustom(GHOST_VERTEX_COUNT);
return;
}
TitanVertex vertex = tx.getInternalVertex(vertexId);
Preconditions.checkArgument(vertex instanceof PreloadedVertex, "The bounding transaction is not configured correctly");
PreloadedVertex v = (PreloadedVertex) vertex;
v.setAccessCheck(PreloadedVertex.OPENSTAR_CHECK);
for (Map.Entry<SliceQuery, EntryList> entry : entries.entrySet()) {
SliceQuery sq = entry.getKey();
if (sq.equals(VERTEX_EXISTS_QUERY))
continue;
EntryList entryList = entry.getValue();
if (entryList.size() >= sq.getLimit())
metrics.incrementCustom(TRUNCATED_ENTRY_LISTS);
v.addToQueryCache(sq.updateLimit(Query.NO_LIMIT), entryList);
}
job.process(v, metrics);
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class GhostVertexRemover method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
long vertexId = getVertexId(key);
assert entries.size() == 1;
assert entries.get(everythingQueryLimit) != null;
final EntryList everything = entries.get(everythingQueryLimit);
if (!isGhostVertex(vertexId, everything)) {
return;
}
if (everything.size() >= RELATION_COUNT_LIMIT) {
metrics.incrementCustom(SKIPPED_GHOST_LIMIT_COUNT);
return;
}
TitanVertex vertex = tx.getInternalVertex(vertexId);
Preconditions.checkArgument(vertex instanceof CacheVertex, "The bounding transaction is not configured correctly");
CacheVertex v = (CacheVertex) vertex;
v.loadRelations(EVERYTHING_QUERY, new Retriever<SliceQuery, EntryList>() {
@Override
public EntryList get(SliceQuery input) {
return everything;
}
});
int removedRelations = 0;
Iterator<TitanRelation> iter = v.query().noPartitionRestriction().relations().iterator();
while (iter.hasNext()) {
iter.next();
iter.remove();
removedRelations++;
}
//There should be no more system relations to remove
metrics.incrementCustom(REMOVED_VERTEX_COUNT);
metrics.incrementCustom(REMOVED_RELATION_COUNT, removedRelations);
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class BasicVertexCentricQueryBuilder method constructQueryWithoutProfile.
protected BaseVertexCentricQuery constructQueryWithoutProfile(RelationCategory returnType) {
assert returnType != null;
Preconditions.checkArgument(adjacentVertex == null || returnType == RelationCategory.EDGE, "Vertex constraints only apply to edges");
if (limit <= 0)
return BaseVertexCentricQuery.emptyQuery();
//Prepare direction
if (returnType == RelationCategory.PROPERTY) {
if (dir == Direction.IN)
return BaseVertexCentricQuery.emptyQuery();
dir = Direction.OUT;
}
//Prepare order
orders.makeImmutable();
assert orders.hasCommonOrder();
//Prepare constraints
And<TitanRelation> conditions = QueryUtil.constraints2QNF(tx, constraints);
if (conditions == null)
return BaseVertexCentricQuery.emptyQuery();
//Don't be smart with query limit adjustments - it just messes up the caching layer and penalizes when appropriate limits are set by the user!
int sliceLimit = limit;
//Construct (optimal) SliceQueries
EdgeSerializer serializer = tx.getEdgeSerializer();
List<BackendQueryHolder<SliceQuery>> queries;
if (!hasTypes()) {
BackendQueryHolder<SliceQuery> query = new BackendQueryHolder<SliceQuery>(serializer.getQuery(returnType, querySystem), ((dir == Direction.BOTH || (returnType == RelationCategory.PROPERTY && dir == Direction.OUT)) && !conditions.hasChildren()), orders.isEmpty());
if (sliceLimit != Query.NO_LIMIT && sliceLimit < Integer.MAX_VALUE / 3) {
//If only one direction is queried, ask for twice the limit from backend since approximately half will be filtered
if (dir != Direction.BOTH && (returnType == RelationCategory.EDGE || returnType == RelationCategory.RELATION))
sliceLimit *= 2;
}
query.getBackendQuery().setLimit(computeLimit(conditions.size(), sliceLimit));
queries = ImmutableList.of(query);
conditions.add(returnType);
conditions.add(new //Need this to filter out newly created invisible relations in the transaction
VisibilityFilterCondition<TitanRelation>(querySystem ? VisibilityFilterCondition.Visibility.SYSTEM : VisibilityFilterCondition.Visibility.NORMAL));
} else {
Set<RelationType> ts = new HashSet<RelationType>(types.length);
queries = new ArrayList<BackendQueryHolder<SliceQuery>>(types.length + 2);
Map<RelationType, Interval> intervalConstraints = new HashMap<RelationType, Interval>(conditions.size());
final boolean isIntervalFittedConditions = compileConstraints(conditions, intervalConstraints);
for (Interval pint : intervalConstraints.values()) {
//Check if one of the constraints leads to an empty result set
if (pint.isEmpty())
return BaseVertexCentricQuery.emptyQuery();
}
for (String typeName : types) {
InternalRelationType type = QueryUtil.getType(tx, typeName);
if (type == null)
continue;
Preconditions.checkArgument(!querySystem || (type instanceof SystemRelationType), "Can only query for system types: %s", type);
if (type instanceof ImplicitKey)
throw new UnsupportedOperationException("Implicit types are not supported in complex queries: " + type);
ts.add(type);
Direction typeDir = dir;
if (type.isPropertyKey()) {
if (returnType == RelationCategory.EDGE)
throw new IllegalArgumentException("Querying for edges but including a property key: " + type.name());
returnType = RelationCategory.PROPERTY;
typeDir = Direction.OUT;
}
if (type.isEdgeLabel()) {
if (returnType == RelationCategory.PROPERTY)
throw new IllegalArgumentException("Querying for properties but including an edge label: " + type.name());
returnType = RelationCategory.EDGE;
if (!type.isUnidirected(Direction.BOTH)) {
//Make sure unidirectionality lines up
if (typeDir == Direction.BOTH) {
if (type.isUnidirected(Direction.OUT))
typeDir = Direction.OUT;
else
typeDir = Direction.IN;
} else //Directions are incompatible
if (!type.isUnidirected(typeDir))
continue;
}
}
if (type.isEdgeLabel() && typeDir == Direction.BOTH && intervalConstraints.isEmpty() && orders.isEmpty()) {
//TODO: This if-condition is a little too restrictive - we also want to include those cases where there
// ARE intervalConstraints or orders but those cannot be covered by any sort-keys
SliceQuery q = serializer.getQuery(type, typeDir, null);
q.setLimit(sliceLimit);
queries.add(new BackendQueryHolder<SliceQuery>(q, isIntervalFittedConditions, true));
} else {
//Optimize for each direction independently
Direction[] dirs = { typeDir };
if (typeDir == Direction.BOTH) {
if (type.isEdgeLabel())
dirs = new Direction[] { Direction.OUT, Direction.IN };
else
//property key
dirs = new Direction[] { Direction.OUT };
}
for (Direction direction : dirs) {
/*
Find best scoring relation type to answer this query with. We score each candidate by the number
of conditions that each sort-keys satisfy. Equality conditions score higher than interval conditions
since they are more restrictive. We assign additional points if the sort key satisfies the order
of this query.
*/
InternalRelationType bestCandidate = null;
double bestScore = Double.NEGATIVE_INFINITY;
boolean bestCandidateSupportsOrder = false;
for (InternalRelationType candidate : type.getRelationIndexes()) {
//Filter out those that don't apply
if (!candidate.isUnidirected(Direction.BOTH) && !candidate.isUnidirected(direction))
continue;
if (!candidate.equals(type) && candidate.getStatus() != SchemaStatus.ENABLED)
continue;
boolean supportsOrder = orders.isEmpty() ? true : orders.getCommonOrder() == candidate.getSortOrder();
int currentOrder = 0;
double score = 0.0;
PropertyKey[] extendedSortKey = getExtendedSortKey(candidate, direction, tx);
for (int i = 0; i < extendedSortKey.length; i++) {
PropertyKey keyType = extendedSortKey[i];
if (currentOrder < orders.size() && orders.getKey(currentOrder).equals(keyType))
currentOrder++;
Interval interval = intervalConstraints.get(keyType);
if (interval == null || !interval.isPoints()) {
if (interval != null)
score += 1;
break;
} else {
assert interval.isPoints();
score += 5.0 / interval.getPoints().size();
}
}
if (supportsOrder && currentOrder == orders.size())
score += 3;
if (score > bestScore) {
bestScore = score;
bestCandidate = candidate;
bestCandidateSupportsOrder = supportsOrder && currentOrder == orders.size();
}
}
Preconditions.checkArgument(bestCandidate != null, "Current graph schema does not support the specified query constraints for type: %s", type.name());
//Construct sort key constraints for the best candidate and then serialize into a SliceQuery
//that is wrapped into a BackendQueryHolder
PropertyKey[] extendedSortKey = getExtendedSortKey(bestCandidate, direction, tx);
EdgeSerializer.TypedInterval[] sortKeyConstraints = new EdgeSerializer.TypedInterval[extendedSortKey.length];
constructSliceQueries(extendedSortKey, sortKeyConstraints, 0, bestCandidate, direction, intervalConstraints, sliceLimit, isIntervalFittedConditions, bestCandidateSupportsOrder, queries);
}
}
}
if (queries.isEmpty())
return BaseVertexCentricQuery.emptyQuery();
conditions.add(getTypeCondition(ts));
}
return new BaseVertexCentricQuery(QueryUtil.simplifyQNF(conditions), dir, queries, orders, limit);
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery in project titan by thinkaurelius.
the class BasicVertexCentricQueryBuilder method constructSliceQueries.
private void constructSliceQueries(PropertyKey[] extendedSortKey, EdgeSerializer.TypedInterval[] sortKeyConstraints, int position, InternalRelationType bestCandidate, Direction direction, Map<RelationType, Interval> intervalConstraints, int sliceLimit, boolean isIntervalFittedConditions, boolean bestCandidateSupportsOrder, List<BackendQueryHolder<SliceQuery>> queries) {
if (position < extendedSortKey.length) {
PropertyKey keyType = extendedSortKey[position];
Interval interval = intervalConstraints.get(keyType);
if (interval != null) {
sortKeyConstraints[position] = new EdgeSerializer.TypedInterval(keyType, interval);
position++;
}
if (interval != null && interval.isPoints()) {
//Keep invoking recursively to see if we can satisfy more constraints...
for (Object point : interval.getPoints()) {
EdgeSerializer.TypedInterval[] clonedSKC = Arrays.copyOf(sortKeyConstraints, sortKeyConstraints.length);
clonedSKC[position - 1] = new EdgeSerializer.TypedInterval(keyType, new PointInterval(point));
constructSliceQueries(extendedSortKey, clonedSKC, position, bestCandidate, direction, intervalConstraints, sliceLimit, isIntervalFittedConditions, bestCandidateSupportsOrder, queries);
}
return;
}
}
//...otherwise this is it and we can construct the slicequery
boolean isFitted = isIntervalFittedConditions && position == intervalConstraints.size();
if (isFitted && position > 0) {
//If the last interval is open ended toward the larger values, then its not fitted because we need to
//filter out NULL values which are serialized with -1 (largest value) byte up front.
EdgeSerializer.TypedInterval lastInterval = sortKeyConstraints[position - 1];
if (!lastInterval.interval.isPoints() && lastInterval.interval.getEnd() == null)
isFitted = false;
}
EdgeSerializer serializer = tx.getEdgeSerializer();
SliceQuery q = serializer.getQuery(bestCandidate, direction, sortKeyConstraints);
q.setLimit(computeLimit(intervalConstraints.size() - position, sliceLimit));
queries.add(new BackendQueryHolder<SliceQuery>(q, isFitted, bestCandidateSupportsOrder));
}
Aggregations