use of com.baidu.hugegraph.util.E in project incubator-hugegraph by apache.
the class HugeTraverser method edgesOfVertex.
private Iterator<Edge> edgesOfVertex(Id source, EdgeStep edgeStep, boolean mustAllSK) {
Id[] edgeLabels = edgeStep.edgeLabels();
Query query = GraphTransaction.constructEdgesQuery(source, edgeStep.direction(), edgeLabels);
ConditionQuery filter = null;
if (mustAllSK) {
this.fillFilterBySortKeys(query, edgeLabels, edgeStep.properties());
} else {
filter = (ConditionQuery) query.copy();
this.fillFilterByProperties(filter, edgeStep.properties());
}
query.capacity(Query.NO_CAPACITY);
if (edgeStep.limit() != NO_LIMIT) {
query.limit(edgeStep.limit());
}
Iterator<Edge> edges = this.graph().edges(query);
if (filter != null) {
ConditionQuery finalFilter = filter;
edges = new FilterIterator<>(edges, (e) -> {
return finalFilter.test((HugeEdge) e);
});
}
return edgeStep.skipSuperNodeIfNeeded(edges);
}
use of com.baidu.hugegraph.util.E in project incubator-hugegraph by apache.
the class HbaseStore method truncate.
@Override
public void truncate() {
this.checkOpened();
// Total time may cost 3 * TRUNCATE_TIMEOUT, due to there are 3 stores
long timeout = this.sessions.config().get(HbaseOptions.TRUNCATE_TIMEOUT);
long start = System.currentTimeMillis();
BiFunction<String, Future<Void>, Void> wait = (table, future) -> {
long elapsed = System.currentTimeMillis() - start;
long remainingTime = timeout - elapsed / 1000L;
try {
return future.get(remainingTime, TimeUnit.SECONDS);
} catch (Exception e) {
throw new BackendException("Error when truncating table '%s' of '%s' store: %s", table, this.store, e.toString());
}
};
// Truncate tables
List<String> tables = this.tableNames();
Map<String, Future<Void>> futures = new HashMap<>(tables.size());
try {
// Disable tables async
for (String table : tables) {
futures.put(table, this.sessions.disableTableAsync(table));
}
for (Map.Entry<String, Future<Void>> entry : futures.entrySet()) {
wait.apply(entry.getKey(), entry.getValue());
}
} catch (Exception e) {
this.enableTables();
throw new BackendException("Failed to disable table for '%s' store", e, this.store);
}
try {
// Truncate tables async
for (String table : tables) {
futures.put(table, this.sessions.truncateTableAsync(table));
}
for (Map.Entry<String, Future<Void>> entry : futures.entrySet()) {
wait.apply(entry.getKey(), entry.getValue());
}
} catch (Exception e) {
this.enableTables();
throw new BackendException("Failed to truncate table for '%s' store", e, this.store);
}
LOG.debug("Store truncated: {}", this.store);
}
use of com.baidu.hugegraph.util.E in project incubator-hugegraph by apache.
the class HugeGremlinServer method start.
public static GremlinServer start(String conf, String graphsDir, EventHub hub) throws Exception {
// Start GremlinServer with inject traversal source
LOG.info(GremlinServer.getHeader());
final Settings settings;
try {
settings = Settings.read(conf);
} catch (Exception e) {
LOG.error("Can't found the configuration file at '{}' or " + "being parsed properly. [{}]", conf, e.getMessage());
throw e;
}
// Scan graph confs and inject into gremlin server context
E.checkState(settings.graphs != null, "The GremlinServer's settings.graphs is null");
settings.graphs.putAll(ConfigUtil.scanGraphsDir(graphsDir));
LOG.info("Configuring Gremlin Server from {}", conf);
ContextGremlinServer server = new ContextGremlinServer(settings, hub);
// Inject customized traversal source
server.injectTraversalSource();
server.start().exceptionally(t -> {
LOG.error("Gremlin Server was unable to start and will " + "shutdown now: {}", t.getMessage());
server.stop().join();
throw new HugeException("Failed to start Gremlin Server");
}).join();
return server;
}
use of com.baidu.hugegraph.util.E in project incubator-hugegraph by apache.
the class GraphTransaction method joinTxEdges.
private Iterator<?> joinTxEdges(Query query, Iterator<HugeEdge> edges, Map<Id, HugeVertex> removingVertices) {
assert query.resultType().isEdge();
BiFunction<Query, HugeEdge, HugeEdge> matchTxFunc = (q, e) -> {
assert q.resultType() == HugeType.EDGE;
if (e.expired() && !q.showExpired()) {
// Filter expired edges with TTL
return null;
}
// Filter edges matched conditions
return q.test(e) ? e : q.test(e = e.switchOwner()) ? e : null;
};
edges = this.joinTxRecords(query, edges, matchTxFunc, this.addedEdges, this.removedEdges, this.updatedEdges);
if (removingVertices.isEmpty()) {
return edges;
}
// Filter edges that belong to deleted vertex
return new FilterIterator<HugeEdge>(edges, edge -> {
for (HugeVertex v : removingVertices.values()) {
if (edge.belongToVertex(v)) {
return false;
}
}
return true;
});
}
use of com.baidu.hugegraph.util.E in project incubator-hugegraph by apache.
the class API method commit.
public static <R> R commit(HugeGraph g, Callable<R> callable) {
Consumer<Throwable> rollback = (error) -> {
if (error != null) {
LOG.error("Failed to commit", error);
}
try {
g.tx().rollback();
} catch (Throwable e) {
LOG.error("Failed to rollback", e);
}
};
try {
R result = callable.call();
g.tx().commit();
SUCCEED_METER.mark();
return result;
} catch (IllegalArgumentException | NotFoundException | ForbiddenException e) {
ILLEGAL_ARG_ERROR_METER.mark();
rollback.accept(null);
throw e;
} catch (RuntimeException e) {
EXPECTED_ERROR_METER.mark();
rollback.accept(e);
throw e;
} catch (Throwable e) {
UNKNOWN_ERROR_METER.mark();
rollback.accept(e);
// TODO: throw the origin exception 'e'
throw new HugeException("Failed to commit", e);
}
}
Aggregations