use of com.baidu.hugegraph.type.define.HugeKeys in project incubator-hugegraph by apache.
the class CassandraTable method query2Select.
protected List<Select> query2Select(String table, Query query) {
// Build query
Selection selection = QueryBuilder.select();
// Set aggregate
Aggregate aggregate = query.aggregate();
if (aggregate != null) {
if (aggregate.countAll()) {
selection.countAll();
} else {
selection.fcall(aggregate.func().string(), aggregate.column());
}
}
// Set table
Select select = selection.from(table);
// NOTE: Cassandra does not support query.offset()
if (query.offset() != 0) {
LOG.debug("Query offset is not supported on Cassandra store " + "currently, it will be replaced by [0, offset + limit)");
}
// Set order-by
for (Map.Entry<HugeKeys, Order> order : query.orders().entrySet()) {
String name = formatKey(order.getKey());
if (order.getValue() == Order.ASC) {
select.orderBy(QueryBuilder.asc(name));
} else {
assert order.getValue() == Order.DESC;
select.orderBy(QueryBuilder.desc(name));
}
}
// Is query by id?
List<Select> ids = this.queryId2Select(query, select);
if (query.conditionsSize() == 0) {
// Query only by id
this.setPageState(query, ids);
LOG.debug("Query only by id(s): {}", ids);
return ids;
} else {
List<Select> conds = new ArrayList<>(ids.size());
for (Select id : ids) {
// Query by condition
conds.addAll(this.queryCondition2Select(query, id));
}
this.setPageState(query, conds);
LOG.debug("Query by conditions: {}", conds);
return conds;
}
}
use of com.baidu.hugegraph.type.define.HugeKeys in project incubator-hugegraph by apache.
the class CassandraTable method queryId2Select.
protected List<Select> queryId2Select(Query query, Select select) {
// Query by id(s)
if (query.idsSize() == 0) {
return ImmutableList.of(select);
}
List<HugeKeys> nameParts = this.idColumnName();
List<List<Object>> ids = new ArrayList<>(query.idsSize());
for (Id id : query.ids()) {
List<Object> idParts = this.idColumnValue(id);
if (nameParts.size() != idParts.size()) {
throw new NotFoundException("Unsupported ID format: '%s' (should contain %s)", id, nameParts);
}
ids.add(idParts);
}
// Query only by partition-key
if (nameParts.size() == 1) {
List<Object> idList = new ArrayList<>(ids.size());
for (List<Object> id : ids) {
assert id.size() == 1;
idList.add(id.get(0));
}
return this.ids2IdSelects(select, nameParts.get(0), idList);
}
/*
* Query by partition-key + clustering-key
* NOTE: Error if multi-column IN clause include partition key:
* error: multi-column relations can only be applied to clustering
* columns when using: select.where(QueryBuilder.in(names, idList));
* So we use multi-query instead of IN
*/
List<Select> selects = new ArrayList<>(ids.size());
for (List<Object> id : ids) {
assert nameParts.size() == id.size();
Select idSelect = cloneSelect(select, this.table());
/*
* NOTE: concat with AND relation, like:
* "pk = id and ck1 = v1 and ck2 = v2"
*/
for (int i = 0, n = nameParts.size(); i < n; i++) {
idSelect.where(formatEQ(nameParts.get(i), id.get(i)));
}
selects.add(idSelect);
}
return selects;
}
use of com.baidu.hugegraph.type.define.HugeKeys in project incubator-hugegraph by apache.
the class BinarySerializer method writeQueryEdgePrefixCondition.
private Query writeQueryEdgePrefixCondition(ConditionQuery cq) {
int count = 0;
BytesBuffer buffer = BytesBuffer.allocate(BytesBuffer.BUF_EDGE_ID);
for (HugeKeys key : EdgeId.KEYS) {
Object value = cq.condition(key);
if (value != null) {
count++;
} else {
if (key == HugeKeys.DIRECTION) {
// Direction is null, set to OUT
value = Directions.OUT;
} else {
break;
}
}
if (key == HugeKeys.OWNER_VERTEX || key == HugeKeys.OTHER_VERTEX) {
writePartitionedId(HugeType.EDGE, (Id) value, buffer);
} else if (key == HugeKeys.DIRECTION) {
byte t = ((Directions) value).type().code();
buffer.write(t);
} else if (key == HugeKeys.LABEL) {
assert value instanceof Id;
buffer.writeId((Id) value);
} else if (key == HugeKeys.SORT_VALUES) {
assert value instanceof String;
buffer.writeStringWithEnding((String) value);
} else {
assert false : key;
}
}
if (count > 0) {
assert count == cq.conditionsSize();
return prefixQuery(cq, new BinaryId(buffer.bytes(), null));
}
return null;
}
use of com.baidu.hugegraph.type.define.HugeKeys in project incubator-hugegraph by apache.
the class TableSerializer method writeQueryCondition.
@Override
protected Query writeQueryCondition(Query query) {
ConditionQuery result = (ConditionQuery) query;
// No user-prop when serialize
assert result.allSysprop();
for (Condition.Relation r : result.relations()) {
if (!r.value().equals(r.serialValue())) {
// Has been serialized before (maybe share a query multi times)
continue;
}
HugeKeys key = (HugeKeys) r.key();
if (r.relation() == Condition.RelationType.IN) {
E.checkArgument(r.value() instanceof List, "Expect list value for IN condition: %s", r);
List<?> values = (List<?>) r.value();
List<Object> serializedValues = new ArrayList<>(values.size());
for (Object v : values) {
serializedValues.add(this.serializeValue(key, v));
}
r.serialValue(serializedValues);
} else if (r.relation() == Condition.RelationType.CONTAINS_VALUE && query.resultType().isGraph()) {
r.serialValue(this.writeProperty(null, r.value()));
} else {
r.serialValue(this.serializeValue(key, r.value()));
}
}
return result;
}
use of com.baidu.hugegraph.type.define.HugeKeys in project incubator-hugegraph by apache.
the class TraversalUtil method convCompare2SyspropRelation.
private static Relation convCompare2SyspropRelation(HugeGraph graph, HugeType type, HasContainer has) {
BiPredicate<?, ?> bp = has.getPredicate().getBiPredicate();
assert bp instanceof Compare;
HugeKeys key = token2HugeKey(has.getKey());
E.checkNotNull(key, "token key");
Object value = convSysValueIfNeeded(graph, type, key, has.getValue());
switch((Compare) bp) {
case eq:
return Condition.eq(key, value);
case gt:
return Condition.gt(key, value);
case gte:
return Condition.gte(key, value);
case lt:
return Condition.lt(key, value);
case lte:
return Condition.lte(key, value);
case neq:
return Condition.neq(key, value);
default:
throw newUnsupportedPredicate(has.getPredicate());
}
}
Aggregations