use of org.janusgraph.diskstorage.BaseTransaction in project janusgraph by JanusGraph.
the class LuceneIndex method queryCount.
@Override
public Long queryCount(IndexQuery query, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
// Construct query
final String store = query.getStore();
final LuceneCustomAnalyzer delegatingAnalyzer = delegatingAnalyzerFor(store, information);
final SearchParams searchParams = convertQuery(query.getCondition(), information.get(store), delegatingAnalyzer);
try {
final IndexSearcher searcher = ((Transaction) tx).getSearcher(query.getStore());
if (searcher == null) {
// Index does not yet exist
return 0L;
}
Query q = searchParams.getQuery();
final long time = System.currentTimeMillis();
// We ignore offset and limit for totals
final TopDocs docs = searcher.search(q, 1);
log.debug("Executed query [{}] in {} ms", q, System.currentTimeMillis() - time);
return docs.totalHits.value;
} catch (final IOException e) {
throw new TemporaryBackendException("Could not execute Lucene query", e);
}
}
use of org.janusgraph.diskstorage.BaseTransaction in project janusgraph by JanusGraph.
the class LuceneIndex method totals.
@Override
public Long totals(RawQuery query, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
final Query q;
try {
q = getQueryParser(query.getStore(), information).parse(query.getQuery());
} catch (final ParseException e) {
throw new PermanentBackendException("Could not parse raw query: " + query.getQuery(), e);
}
try {
final IndexSearcher searcher = ((Transaction) tx).getSearcher(query.getStore());
// Index does not yet exist
if (searcher == null)
return 0L;
final long time = System.currentTimeMillis();
// Lucene doesn't like limits of 0. Also, it doesn't efficiently build a total list.
query.setLimit(1);
// We ignore offset and limit for totals
final TopDocs docs = searcher.search(q, 1);
log.debug("Executed query [{}] in {} ms", q, System.currentTimeMillis() - time);
return docs.totalHits.value;
} catch (final IOException e) {
throw new TemporaryBackendException("Could not execute Lucene query", e);
}
}
use of org.janusgraph.diskstorage.BaseTransaction in project janusgraph by JanusGraph.
the class LuceneIndex method query.
@Override
public Stream<RawQuery.Result<String>> query(RawQuery query, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
final Query q;
try {
q = getQueryParser(query.getStore(), information).parse(query.getQuery());
// Lucene query parser does not take additional parameters so any parameters on the RawQuery are ignored.
} catch (final ParseException e) {
throw new PermanentBackendException("Could not parse raw query: " + query.getQuery(), e);
}
try {
final IndexSearcher searcher = ((Transaction) tx).getSearcher(query.getStore());
if (searcher == null) {
// Index does not yet exist
return Collections.unmodifiableList(new ArrayList<RawQuery.Result<String>>()).stream();
}
final long time = System.currentTimeMillis();
// TODO: can we make offset more efficient in Lucene?
final int offset = query.getOffset();
int adjustedLimit = query.hasLimit() ? query.getLimit() : Integer.MAX_VALUE - 1;
if (adjustedLimit < Integer.MAX_VALUE - 1 - offset)
adjustedLimit += offset;
else
adjustedLimit = Integer.MAX_VALUE - 1;
final TopDocs docs;
if (query.getOrders().isEmpty()) {
docs = searcher.search(q, adjustedLimit);
} else {
docs = searcher.search(q, adjustedLimit, getSortOrder(query.getOrders(), information.get(query.getStore())));
}
log.debug("Executed query [{}] in {} ms", q, System.currentTimeMillis() - time);
final List<RawQuery.Result<String>> result = new ArrayList<>(docs.scoreDocs.length);
for (int i = offset; i < docs.scoreDocs.length; i++) {
final IndexableField field = searcher.doc(docs.scoreDocs[i].doc, FIELDS_TO_LOAD).getField(DOCID);
result.add(new RawQuery.Result<>(field == null ? null : field.stringValue(), docs.scoreDocs[i].score));
}
return result.stream();
} catch (final IOException e) {
throw new TemporaryBackendException("Could not execute Lucene query", e);
}
}
use of org.janusgraph.diskstorage.BaseTransaction in project janusgraph by JanusGraph.
the class LuceneIndex method query.
@Override
public Stream<String> query(IndexQuery query, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
// Construct query
final String store = query.getStore();
final LuceneCustomAnalyzer delegatingAnalyzer = delegatingAnalyzerFor(store, information);
final SearchParams searchParams = convertQuery(query.getCondition(), information.get(store), delegatingAnalyzer);
try {
final IndexSearcher searcher = ((Transaction) tx).getSearcher(query.getStore());
if (searcher == null) {
// Index does not yet exist
return Collections.unmodifiableList(new ArrayList<String>()).stream();
}
Query q = searchParams.getQuery();
if (null == q)
q = new MatchAllDocsQuery();
final long time = System.currentTimeMillis();
final TopDocs docs;
int limit = query.hasLimit() ? query.getLimit() : Integer.MAX_VALUE - 1;
if (query.getOrder().isEmpty()) {
docs = searcher.search(q, limit);
} else {
docs = searcher.search(q, limit, getSortOrder(query.getOrder(), information.get(store)));
}
log.debug("Executed query [{}] in {} ms", q, System.currentTimeMillis() - time);
final List<String> result = new ArrayList<>(docs.scoreDocs.length);
for (int i = 0; i < docs.scoreDocs.length; i++) {
final IndexableField field = searcher.doc(docs.scoreDocs[i].doc, FIELDS_TO_LOAD).getField(DOCID);
result.add(field == null ? null : field.stringValue());
}
return result.stream();
} catch (final IOException e) {
throw new TemporaryBackendException("Could not execute Lucene query", e);
}
}
use of org.janusgraph.diskstorage.BaseTransaction in project janusgraph by JanusGraph.
the class LuceneIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
final Transaction ltx = (Transaction) tx;
writerLock.lock();
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
mutateStores(stores, information);
}
ltx.postCommit();
} catch (final IOException e) {
throw new TemporaryBackendException("Could not update Lucene index", e);
} finally {
writerLock.unlock();
}
}
Aggregations