use of com.alibaba.maxgraph.compiler.api.schema.GraphSchema in project GraphScope by alibaba.
the class MaxGraphImpl method addEdge.
@Override
public Edge addEdge(String label, Vertex src, Vertex dst, Map<String, Object> properties) {
GraphSchema schema = getSchema();
int edgeLabelId = schema.getElement(label).getLabelId();
EdgeKind edgeKind = EdgeKind.newBuilder().setEdgeLabelId(new LabelId(edgeLabelId)).setSrcVertexLabelId(new LabelId(src.id.typeId())).setDstVertexLabelId(new LabelId(dst.id.typeId())).build();
long innerId = ++startEdgeInnerId;
EdgeId edgeId = new EdgeId(new VertexId(src.id.id()), new VertexId(dst.id.id()), innerId);
EdgeTarget edgeTarget = new EdgeTarget(edgeKind, edgeId);
DataRecord dataRecord = new DataRecord(edgeTarget, properties);
WriteRequest writeRequest = new WriteRequest(OperationType.OVERWRITE_EDGE, dataRecord);
graphWriter.writeBatch(getClass().getCanonicalName(), this.writeSession, Arrays.asList(writeRequest));
return null;
}
use of com.alibaba.maxgraph.compiler.api.schema.GraphSchema in project GraphScope by alibaba.
the class RemoteProxy method scanEdge.
public Iterator<Edge> scanEdge(Set<String> labelList) {
Pair<GraphSchema, Long> pair = schemaFetcher.getSchemaSnapshotPair();
Set<Integer> labelIdList = Sets.newHashSet();
if (null == labelList || labelList.isEmpty()) {
labelIdList.add(0);
} else {
for (String label : labelList) {
try {
labelIdList.add(pair.getLeft().getElement(label).getLabelId());
} catch (Exception ignored) {
}
}
}
if (labelIdList.isEmpty()) {
return new ArrayList<Edge>().iterator();
}
List<Iterator<StoreApi.GraphEdgeReponse>> resList = Lists.newArrayList();
for (int labelId : labelIdList) {
StoreApi.ScanEdgeRequest.Builder req = StoreApi.ScanEdgeRequest.newBuilder();
req.setSnapshotId(pair.getRight()).setOffset(0).setLimit(Integer.MAX_VALUE).setTypeId(labelId);
resList.add(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS).scanEdges(req.build()));
}
return new IteratorList<>(resList, new EdgeResponseFunction(pair.getLeft(), this.graph));
}
use of com.alibaba.maxgraph.compiler.api.schema.GraphSchema in project GraphScope by alibaba.
the class GraphWriter method writeBatch.
public void writeBatch(String requestId, String writeSession, List<WriteRequest> writeRequests, CompletionCallback<Long> callback) {
this.pendingWriteCount.incrementAndGet();
GraphSchema schema = snapshotCache.getSnapshotWithSchema().getGraphDef();
OperationBatch.Builder batchBuilder = OperationBatch.newBuilder();
for (WriteRequest writeRequest : writeRequests) {
OperationType operationType = writeRequest.getOperationType();
DataRecord dataRecord = writeRequest.getDataRecord();
switch(operationType) {
case OVERWRITE_VERTEX:
addOverwriteVertexOperation(batchBuilder, schema, dataRecord);
break;
case UPDATE_VERTEX:
addUpdateVertexOperation(batchBuilder, schema, dataRecord);
break;
case DELETE_VERTEX:
addDeleteVertexOperation(batchBuilder, schema, dataRecord);
break;
case OVERWRITE_EDGE:
addOverwriteEdgeOperation(batchBuilder, schema, dataRecord);
break;
case UPDATE_EDGE:
addUpdateEdgeOperation(batchBuilder, schema, dataRecord);
break;
case DELETE_EDGE:
addDeleteEdgeOperation(batchBuilder, schema, dataRecord);
break;
default:
throw new IllegalArgumentException("Invalid operationType [" + operationType + "]");
}
}
OperationBatch operationBatch = batchBuilder.build();
int writeQueueId = getWriteQueueId(writeSession);
int ingestorId = this.metaService.getIngestorIdForQueue(writeQueueId);
long startTimeNano = System.nanoTime();
this.ingestWriteClients.getClient(ingestorId).writeIngestorAsync(requestId, writeQueueId, operationBatch, new CompletionCallback<Long>() {
@Override
public void onCompleted(Long res) {
long writeSnapshotId = res;
lastWrittenSnapshotId.updateAndGet(x -> x < writeSnapshotId ? writeSnapshotId : x);
writeRequestsTotal.addAndGet(writeRequests.size());
finish();
callback.onCompleted(res);
}
@Override
public void onError(Throwable t) {
finish();
callback.onError(t);
}
void finish() {
long ingestorCompleteTimeNano = System.nanoTime();
ingestorBlockTimeNano.addAndGet(ingestorCompleteTimeNano - startTimeNano);
pendingWriteCount.decrementAndGet();
}
});
}
use of com.alibaba.maxgraph.compiler.api.schema.GraphSchema in project GraphScope by alibaba.
the class RemoteTestGraph method dropData.
private void dropData() {
GraphSchema schema = sdkClient.dropSchema();
logger.info("drop schema: " + ((GraphDef) schema).toProto().toString());
}
use of com.alibaba.maxgraph.compiler.api.schema.GraphSchema in project GraphScope by alibaba.
the class MixedTraversalOpProcessor method doProcessTraversal.
private Long doProcessTraversal(Context context, Object object, Graph graph, long timeout, String queryId, Stopwatch timer) {
Pair<GraphSchema, Long> snapshotSchema;
GraphSchema schema;
Long resultNum = 0L;
if (object instanceof GraphTraversal.Admin || object instanceof DfsTraversal) {
QueryStatus queryStatus;
GraphTraversal.Admin traversal = (object instanceof GraphTraversal.Admin) ? GraphTraversal.Admin.class.cast(object) : (DfsTraversal.class.cast(object)).getTraversal();
String queryString = traversal.toString();
logger.info("Receive traversal query=>" + queryString);
if (!traversal.isLocked()) {
traversal.getStrategies().removeStrategies(ProfileStrategy.class, MxGraphStepStrategy.class, FilterRankingStrategy.class);
}
traversal.applyStrategies();
NettyVertexRpcProcessor nettyVertexRpcProcessor;
QueryFlowManager queryFlowManager;
// 保证一查看到snapshotId就开始维护query_status
synchronized (queryCallbackManager) {
snapshotSchema = schemaFetcher.getSchemaSnapshotPair();
queryStatus = queryCallbackManager.beforeExecution(snapshotSchema.getRight());
}
schema = snapshotSchema.getLeft();
LogicalPlanOptimizer planOptimizer = new LogicalPlanOptimizer(new OptimizeConfig(), this.globalPullGraphFlag, schema, snapshotSchema.getRight(), this.lambdaEnableFlag);
final int resultIterationBatchSize = (Integer) context.getRequestMessage().optionalArgs(Tokens.ARGS_BATCH_SIZE).orElse(this.resultIterationBatchSize);
nettyVertexRpcProcessor = new NettyTraverserVertexProcessor(context, resultIterationBatchSize, false);
try {
queryFlowManager = (object instanceof GraphTraversal.Admin) ? planOptimizer.build(GraphTraversal.class.cast(traversal)) : planOptimizer.build(DfsTraversal.class.cast(object));
} catch (IllegalArgumentException iae) {
if (iae.getMessage().contains("MaxGraphIoStep")) {
logger.info("do maxgraph io step");
while (traversal.hasNext()) {
logger.info("maxgraph io hasNext");
}
nettyVertexRpcProcessor.finish(ResponseStatusCode.SUCCESS);
return 0L;
}
throw iae;
}
try {
boolean isLambdaExisted = TraversalHelper.anyStepRecursively(s -> s instanceof LambdaHolder, (Traversal.Admin<?, ?>) traversal);
queryFlowManager.getQueryFlow().setFrontId(serverId);
if (this.lambdaEnableFlag && isLambdaExisted) {
final ObjectMapper mapper = GraphSONMapper.build().version(GraphSONVersion.V3_0).addCustomModule(GraphSONXModuleV3d0.build().create(false)).create().createMapper();
Bytecode bytecode = (Bytecode) context.getRequestMessage().getArgs().get(Tokens.ARGS_GREMLIN);
byte[] bytecodeByte = mapper.writeValueAsBytes(bytecode);
queryFlowManager.getQueryFlow().setLambdaExisted(isLambdaExisted).setBytecode(ByteString.copyFrom(bytecodeByte));
}
GremlinResultTransform gremlinResultTransform = new GremlinResultTransform(remoteRpcConnector, nettyVertexRpcProcessor, this.graph, queryFlowManager.getResultValueType(), vertexCacheFlag);
NettyResultProcessor nettyResultProcessor = new NettyResultProcessor(queryId, traversal.toString(), context, new ExecuteConfig().getBatchQuerySize(), resultIterationBatchSize, false);
nettyResultProcessor.setSchema(schema);
nettyResultProcessor.setResultTransform(gremlinResultTransform);
nettyResultProcessor.setLabelIndexNameList(queryFlowManager.getTreeNodeLabelManager().getUserIndexLabelList());
TimelyQuery timelyQuery = new TimelyQuery(queryFlowManager, nettyResultProcessor, this.graph);
Logging.query(this.graphName, FRONTEND, this.serverId, queryId, QueryType.EXECUTE, QueryEvent.PLAN_GENERATED, timer.elapsed(TimeUnit.NANOSECONDS), null, null, "");
timelyExecutor.execute(timelyQuery, schema, timeout, queryId);
resultNum = nettyResultProcessor.total();
} catch (JsonProcessingException e) {
e.printStackTrace();
} finally {
queryCallbackManager.afterExecution(queryStatus);
}
} else {
throw new IllegalArgumentException("Not support to process=>" + object);
}
return resultNum;
}
Aggregations