use of com.baidu.hugegraph.loader.struct.GraphStructV1 in project incubator-hugegraph-toolchain by apache.
the class MappingUtil method parseV1.
private static LoadMapping parseV1(String json) {
GraphStructV1 graphStruct = JsonUtil.fromJson(json, GraphStructV1.class);
Map<FileSourceKey, InputStruct> fileSourceInputStructs = InsertionOrderUtil.newMap();
List<InputStruct> jdbcSourceInputStructs = new ArrayList<>();
for (ElementStructV1 originStruct : graphStruct.structs()) {
InputSource inputSource = originStruct.input();
ElementMapping targetStruct = convertV1ToV2(originStruct);
SourceType type = inputSource.type();
if (type == SourceType.FILE || type == SourceType.HDFS) {
FileSource source = (FileSource) inputSource;
FileSourceKey key = new FileSourceKey(type, source.path());
fileSourceInputStructs.compute(key, (k, inputStruct) -> {
if (inputStruct == null) {
inputStruct = new InputStruct(null, null);
inputStruct.input(source);
}
inputStruct.add(targetStruct);
return inputStruct;
});
} else {
assert type == SourceType.JDBC;
InputStruct inputStruct = new InputStruct(null, null);
inputStruct.input(inputSource);
inputStruct.add(targetStruct);
jdbcSourceInputStructs.add(inputStruct);
}
}
// Generate id for every input mapping
List<InputStruct> inputStructs = new ArrayList<>();
int id = 0;
for (InputStruct inputStruct : fileSourceInputStructs.values()) {
inputStruct.id(String.valueOf(++id));
inputStructs.add(inputStruct);
}
for (InputStruct inputStruct : jdbcSourceInputStructs) {
inputStruct.id(String.valueOf(++id));
inputStructs.add(inputStruct);
}
return new LoadMapping(inputStructs);
}
Aggregations