use of com.baidu.hugegraph.iterator.ExtendableIterator in project incubator-hugegraph by apache.
the class CachedGraphTransaction method queryEdgesFromBackend.
@Override
@Watched(prefix = "graphcache")
protected final Iterator<HugeEdge> queryEdgesFromBackend(Query query) {
RamTable ramtable = this.params().ramtable();
if (ramtable != null && ramtable.matched(query)) {
return ramtable.query(query);
}
if (!this.enableCacheEdge() || query.empty() || query.paging() || query.bigCapacity()) {
// Query all edges or query edges in paging, don't cache it
return super.queryEdgesFromBackend(query);
}
Id cacheKey = new QueryId(query);
Object value = this.edgesCache.get(cacheKey);
@SuppressWarnings("unchecked") Collection<HugeEdge> edges = (Collection<HugeEdge>) value;
if (value != null) {
for (HugeEdge edge : edges) {
if (edge.expired()) {
this.edgesCache.invalidate(cacheKey);
value = null;
}
}
}
if (value != null) {
// Not cached or the cache expired
return edges.iterator();
}
Iterator<HugeEdge> rs = super.queryEdgesFromBackend(query);
/*
* Iterator can't be cached, caching list instead
* there may be super node and too many edges in a query,
* try fetch a few of the head results and determine whether to cache.
*/
final int tryMax = 1 + MAX_CACHE_EDGES_PER_QUERY;
assert tryMax > MAX_CACHE_EDGES_PER_QUERY;
edges = new ArrayList<>(tryMax);
for (int i = 0; rs.hasNext() && i < tryMax; i++) {
edges.add(rs.next());
}
if (edges.size() == 0) {
this.edgesCache.update(cacheKey, Collections.emptyList());
} else if (edges.size() <= MAX_CACHE_EDGES_PER_QUERY) {
this.edgesCache.update(cacheKey, edges);
}
return new ExtendableIterator<>(edges.iterator(), rs);
}
use of com.baidu.hugegraph.iterator.ExtendableIterator in project incubator-hugegraph by apache.
the class CachedGraphTransaction method queryVerticesByIds.
@Watched(prefix = "graphcache")
private Iterator<HugeVertex> queryVerticesByIds(IdQuery query) {
if (query.idsSize() == 1) {
Id vertexId = query.ids().iterator().next();
HugeVertex vertex = (HugeVertex) this.verticesCache.get(vertexId);
if (vertex != null) {
if (!vertex.expired()) {
return QueryResults.iterator(vertex);
}
this.verticesCache.invalidate(vertexId);
}
Iterator<HugeVertex> rs = super.queryVerticesFromBackend(query);
vertex = QueryResults.one(rs);
if (vertex == null) {
return QueryResults.emptyIterator();
}
if (needCacheVertex(vertex)) {
this.verticesCache.update(vertex.id(), vertex);
}
return QueryResults.iterator(vertex);
}
IdQuery newQuery = new IdQuery(HugeType.VERTEX, query);
List<HugeVertex> vertices = new ArrayList<>();
for (Id vertexId : query.ids()) {
HugeVertex vertex = (HugeVertex) this.verticesCache.get(vertexId);
if (vertex == null) {
newQuery.query(vertexId);
} else if (vertex.expired()) {
newQuery.query(vertexId);
this.verticesCache.invalidate(vertexId);
} else {
vertices.add(vertex);
}
}
// Join results from cache and backend
ExtendableIterator<HugeVertex> results = new ExtendableIterator<>();
if (!vertices.isEmpty()) {
results.extend(vertices.iterator());
} else {
// Just use the origin query if find none from the cache
newQuery = query;
}
if (!newQuery.empty()) {
Iterator<HugeVertex> rs = super.queryVerticesFromBackend(newQuery);
// Generally there are not too much data with id query
ListIterator<HugeVertex> listIterator = QueryResults.toList(rs);
for (HugeVertex vertex : listIterator.list()) {
// Skip large vertex
if (needCacheVertex(vertex)) {
this.verticesCache.update(vertex.id(), vertex);
}
}
results.extend(listIterator);
}
return results;
}
use of com.baidu.hugegraph.iterator.ExtendableIterator in project incubator-hugegraph by apache.
the class CassandraTable method query.
protected <R> Iterator<R> query(Query query, Function<Statement, ResultSet> fetcher, BiFunction<Query, ResultSet, Iterator<R>> parser) {
ExtendableIterator<R> rs = new ExtendableIterator<>();
if (query.limit() == 0L && !query.noLimit()) {
LOG.debug("Return empty result(limit=0) for query {}", query);
return rs;
}
List<Select> selects = this.query2Select(this.table(), query);
try {
for (Select select : selects) {
ResultSet results = fetcher.apply(select);
rs.extend(parser.apply(query, results));
}
} catch (DriverException e) {
LOG.debug("Failed to query [{}], detail statement: {}", query, selects, e);
throw new BackendException("Failed to query [%s]", e, query);
}
LOG.debug("Return {} for query {}", rs, query);
return rs;
}
use of com.baidu.hugegraph.iterator.ExtendableIterator in project incubator-hugegraph by apache.
the class HugeTraverser method edgesOfVertex.
@Watched
protected Iterator<Edge> edgesOfVertex(Id source, Directions dir, Map<Id, String> labels, long limit) {
if (labels == null || labels.isEmpty()) {
return this.edgesOfVertex(source, dir, (Id) null, limit);
}
ExtendableIterator<Edge> results = new ExtendableIterator<>();
for (Id label : labels.keySet()) {
E.checkNotNull(label, "edge label");
results.extend(this.edgesOfVertex(source, dir, label, limit));
}
if (limit == NO_LIMIT) {
return results;
}
long[] count = new long[1];
return new LimitIterator<>(results, e -> {
return count[0]++ >= limit;
});
}
use of com.baidu.hugegraph.iterator.ExtendableIterator in project hugegraph-common by hugegraph.
the class ReflectionUtil method classes.
public static Iterator<ClassInfo> classes(String... packages) throws IOException {
ClassPath path = ClassPath.from(ReflectionUtil.class.getClassLoader());
ExtendableIterator<ClassInfo> results = new ExtendableIterator<>();
for (String p : packages) {
results.extend(path.getTopLevelClassesRecursive(p).iterator());
}
return results;
}
Aggregations