use of com.baidu.hugegraph.api.filter.DecompressInterceptor.Decompress in project incubator-hugegraph by apache.
the class EdgeAPI method create.
@POST
@Timed(name = "batch-create")
@Decompress
@Path("batch")
@Status(Status.CREATED)
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({ "admin", "$owner=$graph $action=edge_write" })
public String create(@Context HugeConfig config, @Context GraphManager manager, @PathParam("graph") String graph, @QueryParam("check_vertex") @DefaultValue("true") boolean checkVertex, List<JsonEdge> jsonEdges) {
LOG.debug("Graph [{}] create edges: {}", graph, jsonEdges);
checkCreatingBody(jsonEdges);
checkBatchSize(config, jsonEdges);
HugeGraph g = graph(manager, graph);
TriFunction<HugeGraph, Object, String, Vertex> getVertex = checkVertex ? EdgeAPI::getVertex : EdgeAPI::newVertex;
return this.commit(config, g, jsonEdges.size(), () -> {
List<Id> ids = new ArrayList<>(jsonEdges.size());
for (JsonEdge jsonEdge : jsonEdges) {
/*
* NOTE: If the query param 'checkVertex' is false,
* then the label is correct and not matched id,
* it will be allowed currently
*/
Vertex srcVertex = getVertex.apply(g, jsonEdge.source, jsonEdge.sourceLabel);
Vertex tgtVertex = getVertex.apply(g, jsonEdge.target, jsonEdge.targetLabel);
Edge edge = srcVertex.addEdge(jsonEdge.label, tgtVertex, jsonEdge.properties());
ids.add((Id) edge.id());
}
return manager.serializer(g).writeIds(ids);
});
}
use of com.baidu.hugegraph.api.filter.DecompressInterceptor.Decompress in project incubator-hugegraph by apache.
the class VertexAPI method create.
@POST
@Timed(name = "batch-create")
@Decompress
@Path("batch")
@Status(Status.CREATED)
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({ "admin", "$owner=$graph $action=vertex_write" })
public String create(@Context HugeConfig config, @Context GraphManager manager, @PathParam("graph") String graph, List<JsonVertex> jsonVertices) {
LOG.debug("Graph [{}] create vertices: {}", graph, jsonVertices);
checkCreatingBody(jsonVertices);
checkBatchSize(config, jsonVertices);
HugeGraph g = graph(manager, graph);
return this.commit(config, g, jsonVertices.size(), () -> {
List<Id> ids = new ArrayList<>(jsonVertices.size());
for (JsonVertex vertex : jsonVertices) {
ids.add((Id) g.addVertex(vertex.properties()).id());
}
return manager.serializer(g).writeIds(ids);
});
}
use of com.baidu.hugegraph.api.filter.DecompressInterceptor.Decompress in project incubator-hugegraph by apache.
the class EdgeAPI method update.
/**
* Batch update steps are same like vertices
*/
@PUT
@Timed(name = "batch-update")
@Decompress
@Path("batch")
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({ "admin", "$owner=$graph $action=edge_write" })
public String update(@Context HugeConfig config, @Context GraphManager manager, @PathParam("graph") String graph, BatchEdgeRequest req) {
BatchEdgeRequest.checkUpdate(req);
LOG.debug("Graph [{}] update edges: {}", graph, req);
checkUpdatingBody(req.jsonEdges);
checkBatchSize(config, req.jsonEdges);
HugeGraph g = graph(manager, graph);
Map<Id, JsonEdge> map = new HashMap<>(req.jsonEdges.size());
TriFunction<HugeGraph, Object, String, Vertex> getVertex = req.checkVertex ? EdgeAPI::getVertex : EdgeAPI::newVertex;
return this.commit(config, g, map.size(), () -> {
// 1.Put all newEdges' properties into map (combine first)
req.jsonEdges.forEach(newEdge -> {
Id newEdgeId = getEdgeId(graph(manager, graph), newEdge);
JsonEdge oldEdge = map.get(newEdgeId);
this.updateExistElement(oldEdge, newEdge, req.updateStrategies);
map.put(newEdgeId, newEdge);
});
// 2.Get all oldEdges and update with new ones
Object[] ids = map.keySet().toArray();
Iterator<Edge> oldEdges = g.edges(ids);
oldEdges.forEachRemaining(oldEdge -> {
JsonEdge newEdge = map.get(oldEdge.id());
this.updateExistElement(g, oldEdge, newEdge, req.updateStrategies);
});
// 3.Add all finalEdges
List<Edge> edges = new ArrayList<>(map.size());
map.values().forEach(finalEdge -> {
Vertex srcVertex = getVertex.apply(g, finalEdge.source, finalEdge.sourceLabel);
Vertex tgtVertex = getVertex.apply(g, finalEdge.target, finalEdge.targetLabel);
edges.add(srcVertex.addEdge(finalEdge.label, tgtVertex, finalEdge.properties()));
});
// If return ids, the ids.size() maybe different with the origins'
return manager.serializer(g).writeEdges(edges.iterator(), false);
});
}
use of com.baidu.hugegraph.api.filter.DecompressInterceptor.Decompress in project incubator-hugegraph by apache.
the class VertexAPI method update.
/**
* Batch update steps like:
* 1. Get all newVertices' ID & combine first
* 2. Get all oldVertices & update
* 3. Add the final vertex together
*/
@PUT
@Timed(name = "batch-update")
@Decompress
@Path("batch")
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({ "admin", "$owner=$graph $action=vertex_write" })
public String update(@Context HugeConfig config, @Context GraphManager manager, @PathParam("graph") String graph, BatchVertexRequest req) {
BatchVertexRequest.checkUpdate(req);
LOG.debug("Graph [{}] update vertices: {}", graph, req);
checkUpdatingBody(req.jsonVertices);
checkBatchSize(config, req.jsonVertices);
HugeGraph g = graph(manager, graph);
Map<Id, JsonVertex> map = new HashMap<>(req.jsonVertices.size());
return this.commit(config, g, map.size(), () -> {
/*
* 1.Put all newVertices' properties into map (combine first)
* - Consider primary-key & user-define ID mode first
*/
req.jsonVertices.forEach(newVertex -> {
Id newVertexId = getVertexId(g, newVertex);
JsonVertex oldVertex = map.get(newVertexId);
this.updateExistElement(oldVertex, newVertex, req.updateStrategies);
map.put(newVertexId, newVertex);
});
// 2.Get all oldVertices and update with new vertices
Object[] ids = map.keySet().toArray();
Iterator<Vertex> oldVertices = g.vertices(ids);
oldVertices.forEachRemaining(oldVertex -> {
JsonVertex newVertex = map.get(oldVertex.id());
this.updateExistElement(g, oldVertex, newVertex, req.updateStrategies);
});
// 3.Add finalVertices and return them
List<Vertex> vertices = new ArrayList<>(map.size());
map.values().forEach(finalVertex -> {
vertices.add(g.addVertex(finalVertex.properties()));
});
// If return ids, the ids.size() maybe different with the origins'
return manager.serializer(g).writeVertices(vertices.iterator(), false);
});
}
Aggregations