use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class StandardJanusGraphTx method executeMultiQuery.
public void executeMultiQuery(final Collection<InternalVertex> vertices, final SliceQuery sq, final QueryProfiler profiler) {
LongArrayList vertexIds = new LongArrayList(vertices.size());
for (InternalVertex v : vertices) {
if (!v.isNew() && v.hasId() && (v instanceof CacheVertex) && !v.hasLoadedRelations(sq))
vertexIds.add(v.longId());
}
if (!vertexIds.isEmpty()) {
List<EntryList> results = QueryProfiler.profile(profiler, sq, true, q -> graph.edgeMultiQuery(vertexIds, q, txHandle));
int pos = 0;
for (JanusGraphVertex v : vertices) {
if (pos < vertexIds.size() && vertexIds.get(pos) == v.longId()) {
final EntryList vresults = results.get(pos);
((CacheVertex) v).loadRelations(sq, query -> vresults);
pos++;
}
}
}
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class PartitionedVertexProgramExecutor method run.
public void run(int numThreads, ScanMetrics metrics) {
StandardJanusGraphTx tx = null;
Map<Long, EntryList> pVertexAggregates = vertexMemory.retrievePartitionAggregates();
// Nothing to do here
if (pVertexAggregates.isEmpty())
return;
try (WorkerPool workers = new WorkerPool(numThreads)) {
tx = VertexJobConverter.startTransaction(graph);
for (Map.Entry<Long, EntryList> partitionedVertices : pVertexAggregates.entrySet()) {
if (partitionedVertices.getValue() == null) {
metrics.incrementCustom(GHOST_PARTITION_VERTEX);
continue;
}
workers.submit(new PartitionedVertexProcessor(partitionedVertices.getKey(), partitionedVertices.getValue(), tx, metrics));
}
} catch (Throwable ex) {
log.error("Could not post-process partitioned vertices", ex);
metrics.incrementCustom(PARTITION_VERTEX_POSTFAIL);
} finally {
if (tx != null && tx.isOpen())
tx.rollback();
}
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class VertexProgramScanJob method process.
@Override
public void process(JanusGraphVertex vertex, ScanMetrics metrics) {
PreloadedVertex v = (PreloadedVertex) vertex;
long vertexId = v.longId();
VertexMemoryHandler<M> vh = new VertexMemoryHandler(vertexMemory, v);
vh.setInExecute(true);
v.setAccessCheck(PreloadedVertex.OPENSTAR_CHECK);
if (idManager.isPartitionedVertex(vertexId)) {
if (idManager.isCanonicalVertexId(vertexId)) {
EntryList results = v.getFromCache(SYSTEM_PROPS_QUERY);
if (results == null)
results = EntryList.EMPTY_LIST;
vertexMemory.setLoadedProperties(vertexId, results);
}
for (MessageScope scope : vertexMemory.getPreviousScopes()) {
if (scope instanceof MessageScope.Local) {
M combinedMsg = null;
for (Iterator<M> messageIterator = vh.receiveMessages(scope).iterator(); messageIterator.hasNext(); ) {
M msg = messageIterator.next();
if (combinedMsg == null)
combinedMsg = msg;
else
combinedMsg = combiner.combine(combinedMsg, msg);
}
if (combinedMsg != null)
vertexMemory.aggregateMessage(vertexId, combinedMsg, scope);
}
}
} else {
v.setPropertyMixing(vh);
try {
vertexProgram.execute(v, vh, memory);
} catch (ReadOnlyTransactionException e) {
// Ignore read-only transaction errors in FulgoraGraphComputer. In testing these errors are associated
// with cleanup of TraversalVertexProgram.HALTED_TRAVERSALS properties which can safely remain in graph.
}
}
vh.setInExecute(false);
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class MetricInstrumentedStore method getSlice.
@Override
public EntryList getSlice(final KeySliceQuery query, final StoreTransaction txh) throws BackendException {
return runWithMetrics(txh, metricsStoreName, M_GET_SLICE, () -> {
final EntryList result = backend.getSlice(query, txh);
recordSliceMetrics(txh, result);
return result;
});
}
use of org.janusgraph.diskstorage.EntryList in project janusgraph by JanusGraph.
the class StandardSchemaCache method expireSchemaElement.
// @Override
// public void expireSchemaName(final String name) {
// ConcurrentMap<String,Long> types = typeNames;
// if (types!=null) types.remove(name);
// typeNamesBackup.invalidate(name);
// }
@Override
public void expireSchemaElement(final long schemaId) {
// 1) expire relations
final long cutTypeId = (schemaId >>> SCHEMAID_BACK_SHIFT);
ConcurrentMap<Long, EntryList> types = schemaRelations;
if (types != null) {
types.keySet().removeIf(key -> (key >>> SCHEMAID_TOTALFORW_SHIFT) == cutTypeId);
}
for (Long key : schemaRelationsBackup.asMap().keySet()) {
if ((key >>> SCHEMAID_TOTALFORW_SHIFT) == cutTypeId)
schemaRelationsBackup.invalidate(key);
}
// 2) expire names
ConcurrentMap<String, Long> names = typeNames;
if (names != null) {
names.entrySet().removeIf(next -> next.getValue().equals(schemaId));
}
for (Map.Entry<String, Long> entry : typeNamesBackup.asMap().entrySet()) {
if (entry.getValue().equals(schemaId))
typeNamesBackup.invalidate(entry.getKey());
}
}
Aggregations