use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class MultiKeySliceQuery method execute.
public List<EntryList> execute(final BackendTransaction tx) {
int total = 0;
final List<EntryList> result = new ArrayList<>(Math.min(getLimit(), queries.size()));
for (KeySliceQuery ksq : queries) {
EntryList next = tx.indexQuery(ksq.updateLimit(getLimit() - total));
result.add(next);
total += next.size();
if (total >= getLimit())
break;
}
return result;
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class GhostVertexRemover method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
long vertexId = getVertexId(key);
assert entries.size() == 1;
assert entries.get(everythingQueryLimit) != null;
final EntryList everything = entries.get(everythingQueryLimit);
if (!isGhostVertex(vertexId, everything)) {
return;
}
if (everything.size() >= RELATION_COUNT_LIMIT) {
metrics.incrementCustom(SKIPPED_GHOST_LIMIT_COUNT);
return;
}
JanusGraphVertex vertex = tx.getInternalVertex(vertexId);
Preconditions.checkArgument(vertex instanceof CacheVertex, "The bounding transaction is not configured correctly");
CacheVertex v = (CacheVertex) vertex;
v.loadRelations(EVERYTHING_QUERY, input -> everything);
int removedRelations = 0;
Iterator<JanusGraphRelation> iterator = v.query().noPartitionRestriction().relations().iterator();
while (iterator.hasNext()) {
iterator.next();
iterator.remove();
removedRelations++;
}
// There should be no more system relations to remove
metrics.incrementCustom(REMOVED_VERTEX_COUNT);
metrics.incrementCustom(REMOVED_RELATION_COUNT, removedRelations);
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class StandardSchemaCache method getSchemaRelations.
@Override
public EntryList getSchemaRelations(final long schemaId, final BaseRelationType type, final Direction dir) {
assert IDManager.isSystemRelationTypeId(type.longId()) && type.longId() > 0;
Preconditions.checkArgument(IDManager.VertexIDType.Schema.is(schemaId));
Preconditions.checkArgument((Long.MAX_VALUE >>> (SCHEMAID_TOTALFORW_SHIFT - SCHEMAID_BACK_SHIFT)) >= schemaId);
int edgeDir = EdgeDirection.position(dir);
assert edgeDir == 0 || edgeDir == 1;
final long typePlusRelation = getIdentifier(schemaId, type, dir);
ConcurrentMap<Long, EntryList> types = schemaRelations;
EntryList entries;
if (types == null) {
entries = schemaRelationsBackup.getIfPresent(typePlusRelation);
if (entries == null) {
entries = retriever.retrieveSchemaRelations(schemaId, type, dir);
if (!entries.isEmpty()) {
// only cache if type exists
schemaRelationsBackup.put(typePlusRelation, entries);
}
}
} else {
entries = types.get(typePlusRelation);
if (entries == null) {
// Retrieve it
if (types.size() > maxCachedRelations) {
/* Safe guard against the concurrent hash map growing to large - this would be a VERY rare event
as it only happens for graph databases with thousands of types.
*/
schemaRelations = null;
return getSchemaRelations(schemaId, type, dir);
} else {
// Expand map
entries = retriever.retrieveSchemaRelations(schemaId, type, dir);
types.put(typePlusRelation, entries);
}
}
}
assert entries != null;
return entries;
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getNamesSlice.
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
/*
* RowQuery<K,C> should be parametrized as
* RowQuery<ByteBuffer,ByteBuffer>. However, this causes the following
* compilation error when attempting to call withColumnRange on a
* RowQuery<ByteBuffer,ByteBuffer> instance:
*
* java.lang.Error: Unresolved compilation problem: The method
* withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
* for the type RowQuery<ByteBuffer,ByteBuffer>
*
* The compiler substitutes ByteBuffer=C for both startColumn and
* endColumn, compares it to its identical twin with that type
* hard-coded, and dies.
*
*/
// Add one for last column potentially removed in CassandraHelper.makeEntryList
final int queryLimit = query.getLimit() + (query.hasLimit() ? 1 : 0);
final int pageLimit = Math.min(this.readPageSize, queryLimit);
ByteBuffer sliceStart = query.getSliceStart().asByteBuffer();
final ByteBuffer sliceEnd = query.getSliceEnd().asByteBuffer();
final RowSliceQuery rq = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeySlice(CassandraHelper.convert(keys));
// Don't directly chain due to ambiguity resolution; see top comment
rq.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<Rows<ByteBuffer, ByteBuffer>> r;
try {
r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
final Rows<ByteBuffer, ByteBuffer> rows = r.getResult();
final Map<StaticBuffer, EntryList> result = new HashMap<>(rows.size());
for (Row<ByteBuffer, ByteBuffer> row : rows) {
assert !result.containsKey(row.getKey());
final ByteBuffer key = row.getKey();
ColumnList<ByteBuffer> pageColumns = row.getColumns();
final List<Column<ByteBuffer>> queryColumns = new ArrayList();
Iterables.addAll(queryColumns, pageColumns);
while (pageColumns.size() == pageLimit && queryColumns.size() < queryLimit) {
final Column<ByteBuffer> lastColumn = queryColumns.get(queryColumns.size() - 1);
sliceStart = lastColumn.getName();
// No possibility of two values at the same column name, so start the
// next slice one bit after the last column found by the previous query.
// byte[] is little-endian
Integer position = null;
for (int i = sliceStart.array().length - 1; i >= 0; i--) {
if (sliceStart.array()[i] < Byte.MAX_VALUE) {
position = i;
sliceStart.array()[i]++;
break;
}
}
if (null == position) {
throw new PermanentBackendException("Column was not incrementable");
}
final RowQuery pageQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKey(row.getKey());
// Don't directly chain due to ambiguity resolution; see top comment
pageQuery.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<ColumnList<ByteBuffer>> pageResult;
try {
pageResult = (OperationResult<ColumnList<ByteBuffer>>) pageQuery.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
if (Thread.interrupted()) {
throw new TraversalInterruptedException();
}
// Reset the incremented position to avoid leaking mutations up the
// stack to callers - sliceStart.array() in fact refers to a column name
// that will be later read to deserialize an edge (since we assigned it
// via de-referencing a column from the previous query).
sliceStart.array()[position]--;
pageColumns = pageResult.getResult();
Iterables.addAll(queryColumns, pageColumns);
}
result.put(StaticArrayBuffer.of(key), CassandraHelper.makeEntryList(queryColumns, entryGetter, query.getSliceEnd(), query.getLimit()));
}
return result;
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class StandardJanusGraphTx method getVertices.
@Override
public Iterable<JanusGraphVertex> getVertices(long... ids) {
verifyOpen();
if (ids == null || ids.length == 0)
return (Iterable) getInternalVertices();
if (null != config.getGroupName()) {
MetricManager.INSTANCE.getCounter(config.getGroupName(), "db", "getVerticesByID").inc();
}
final List<JanusGraphVertex> result = new ArrayList<>(ids.length);
final LongArrayList vertexIds = new LongArrayList(ids.length);
for (long id : ids) {
if (isValidVertexId(id)) {
if (idInspector.isPartitionedVertex(id))
id = idManager.getCanonicalVertexId(id);
if (vertexCache.contains(id))
result.add(vertexCache.get(id, existingVertexRetriever));
else
vertexIds.add(id);
}
}
if (!vertexIds.isEmpty()) {
if (externalVertexRetriever.hasVerifyExistence()) {
List<EntryList> existence = graph.edgeMultiQuery(vertexIds, graph.vertexExistenceQuery, txHandle);
for (int i = 0; i < vertexIds.size(); i++) {
if (!existence.get(i).isEmpty()) {
long id = vertexIds.get(i);
result.add(vertexCache.get(id, existingVertexRetriever));
}
}
} else {
for (int i = 0; i < vertexIds.size(); i++) {
result.add(vertexCache.get(vertexIds.get(i), externalVertexRetriever));
}
}
}
// Filter out potentially removed vertices
result.removeIf(JanusGraphElement::isRemoved);
return result;
}
Aggregations