use of com.baidu.hugegraph.backend.store.BackendEntry in project incubator-hugegraph by apache.
the class InMemoryDBTable method query.
@Override
public Iterator<BackendEntry> query(BackendSession session, Query query) {
String page = query.page();
if (page != null && !page.isEmpty()) {
throw new NotSupportException("paging by InMemoryDBStore");
}
Map<Id, BackendEntry> rs = this.store;
if (query instanceof IdPrefixQuery) {
IdPrefixQuery pq = (IdPrefixQuery) query;
rs = this.queryByIdPrefix(pq.start(), pq.inclusiveStart(), pq.prefix(), rs);
}
if (query instanceof IdRangeQuery) {
IdRangeQuery rq = (IdRangeQuery) query;
rs = this.queryByIdRange(rq.start(), rq.inclusiveStart(), rq.end(), rq.inclusiveEnd(), rs);
}
// Query by id(s)
if (query.idsSize() > 0) {
rs = this.queryById(query.ids(), rs);
}
// Query by condition(s)
if (query.conditionsSize() > 0) {
ConditionQuery condQuery = (ConditionQuery) query;
if (condQuery.containsScanRelation()) {
return this.queryByRange(condQuery);
}
rs = this.queryByFilter(query.conditions(), rs);
}
Iterator<BackendEntry> iterator = rs.values().iterator();
long offset = query.offset() - query.actualOffset();
if (offset >= rs.size()) {
query.goOffset(rs.size());
return QueryResults.emptyIterator();
}
if (offset > 0L) {
query.goOffset(offset);
iterator = this.skipOffset(iterator, offset);
}
if (!query.noLimit() && query.total() < rs.size()) {
iterator = this.dropTails(iterator, query.limit());
}
return iterator;
}
use of com.baidu.hugegraph.backend.store.BackendEntry in project incubator-hugegraph by apache.
the class SchemaIndexTransaction method queryByName.
@Watched(prefix = "index")
private QueryResults<BackendEntry> queryByName(ConditionQuery query) {
if (!this.needIndexForName()) {
return super.query(query);
}
IndexLabel il = IndexLabel.label(query.resultType());
String name = (String) query.condition(HugeKeys.NAME);
E.checkState(name != null, "The name in condition can't be null " + "when querying schema by name");
ConditionQuery indexQuery;
indexQuery = new ConditionQuery(HugeType.SECONDARY_INDEX, query);
indexQuery.eq(HugeKeys.FIELD_VALUES, name);
indexQuery.eq(HugeKeys.INDEX_LABEL_ID, il.id());
IdQuery idQuery = new IdQuery(query.resultType(), query);
Iterator<BackendEntry> entries = super.query(indexQuery).iterator();
try {
while (entries.hasNext()) {
HugeIndex index = this.serializer.readIndex(graph(), indexQuery, entries.next());
idQuery.query(index.elementIds());
Query.checkForceCapacity(idQuery.idsSize());
}
} finally {
CloseableIterator.closeIterator(entries);
}
if (idQuery.ids().isEmpty()) {
return QueryResults.empty();
}
assert idQuery.idsSize() == 1 : idQuery.ids();
if (idQuery.idsSize() > 1) {
LOG.warn("Multiple ids are found with same name '{}': {}", name, idQuery.ids());
}
return super.query(idQuery);
}
use of com.baidu.hugegraph.backend.store.BackendEntry in project incubator-hugegraph by apache.
the class GraphIndexTransaction method doIndexQueryOnce.
@Watched(prefix = "index")
private PageIds doIndexQueryOnce(IndexLabel indexLabel, ConditionQuery query) {
// Query all or one page
Iterator<BackendEntry> entries = null;
LockUtil.Locks locks = new LockUtil.Locks(this.graphName());
try {
locks.lockReads(LockUtil.INDEX_LABEL_DELETE, indexLabel.id());
locks.lockReads(LockUtil.INDEX_LABEL_REBUILD, indexLabel.id());
Set<Id> ids = InsertionOrderUtil.newSet();
entries = super.query(query).iterator();
while (entries.hasNext()) {
HugeIndex index = this.serializer.readIndex(graph(), query, entries.next());
this.removeExpiredIndexIfNeeded(index, query.showExpired());
ids.addAll(index.elementIds());
if (query.reachLimit(ids.size())) {
break;
}
Query.checkForceCapacity(ids.size());
this.recordIndexValue(query, index);
}
// If there is no data, the entries is not a Metadatable object
if (ids.isEmpty()) {
return PageIds.EMPTY;
}
// NOTE: Memory backend's iterator is not Metadatable
if (!query.paging()) {
return new PageIds(ids, PageState.EMPTY);
}
E.checkState(entries instanceof Metadatable, "The entries must be Metadatable when query " + "in paging, but got '%s'", entries.getClass().getName());
return new PageIds(ids, PageInfo.pageState(entries));
} finally {
locks.unlock();
CloseableIterator.closeIterator(entries);
}
}
use of com.baidu.hugegraph.backend.store.BackendEntry in project incubator-hugegraph by apache.
the class StoreSerializer method writeMutation.
public static byte[] writeMutation(BackendMutation mutation) {
BytesBuffer buffer = BytesBuffer.allocate(MUTATION_SIZE);
// write mutation size
buffer.writeVInt(mutation.size());
for (Iterator<BackendAction> items = mutation.mutation(); items.hasNext(); ) {
BackendAction item = items.next();
// write Action
buffer.write(item.action().code());
BackendEntry entry = item.entry();
// write HugeType
buffer.write(entry.type().code());
// write id
buffer.writeBytes(entry.id().asBytes());
// write subId
if (entry.subId() != null) {
buffer.writeId(entry.subId());
} else {
buffer.writeId(IdGenerator.ZERO);
}
// write ttl
buffer.writeVLong(entry.ttl());
// write columns
buffer.writeVInt(entry.columns().size());
for (BackendColumn column : entry.columns()) {
buffer.writeBytes(column.name);
buffer.writeBytes(column.value);
}
}
return buffer.bytes();
}
use of com.baidu.hugegraph.backend.store.BackendEntry in project incubator-hugegraph by apache.
the class SchemaTransaction method getAllSchema.
protected <T extends SchemaElement> List<T> getAllSchema(HugeType type) {
List<T> results = new ArrayList<>();
Query query = new Query(type);
Iterator<BackendEntry> entries = this.query(query).iterator();
try {
while (entries.hasNext()) {
BackendEntry entry = entries.next();
if (entry == null) {
continue;
}
results.add(this.deserialize(entry, type));
Query.checkForceCapacity(results.size());
}
} finally {
CloseableIterator.closeIterator(entries);
}
return results;
}
Aggregations