use of org.apache.lucene.search.FieldComparator in project jackrabbit by apache.
the class SharedFieldComparatorSource method newComparator.
/**
* Create a new <code>FieldComparator</code> for an embedded <code>propertyName</code>
* and a <code>reader</code>.
*
* @param propertyName the relative path to the property to sort on as returned
* by {@link org.apache.jackrabbit.spi.Path#getString()}.
* @return a <code>FieldComparator</code>
* @throws java.io.IOException if an error occurs
*/
@Override
public FieldComparator newComparator(String propertyName, int numHits, int sortPos, boolean reversed) throws IOException {
PathFactory factory = PathFactoryImpl.getInstance();
Path path = factory.create(propertyName);
try {
SimpleFieldComparator simple = new SimpleFieldComparator(nsMappings.translatePath(path), field, numHits);
return path.getLength() == 1 ? simple : new CompoundScoreFieldComparator(new FieldComparator[] { simple, new RelPathFieldComparator(path, numHits) }, numHits);
} catch (IllegalNameException e) {
throw Util.createIOException(e);
}
}
use of org.apache.lucene.search.FieldComparator in project lucene-solr by apache.
the class FirstPassGroupingCollector method buildSortedSet.
private void buildSortedSet() throws IOException {
final Comparator<CollectedSearchGroup<?>> comparator = new Comparator<CollectedSearchGroup<?>>() {
@Override
public int compare(CollectedSearchGroup<?> o1, CollectedSearchGroup<?> o2) {
for (int compIDX = 0; ; compIDX++) {
FieldComparator<?> fc = comparators[compIDX];
final int c = reversed[compIDX] * fc.compare(o1.comparatorSlot, o2.comparatorSlot);
if (c != 0) {
return c;
} else if (compIDX == compIDXEnd) {
return o1.topDoc - o2.topDoc;
}
}
}
};
orderedGroups = new TreeSet<>(comparator);
orderedGroups.addAll(groupMap.values());
assert orderedGroups.size() > 0;
for (LeafFieldComparator fc : leafComparators) {
fc.setBottom(orderedGroups.last().comparatorSlot);
}
}
use of org.apache.lucene.search.FieldComparator in project crate by crate.
the class SortSymbolVisitor method customSortField.
private SortField customSortField(String name, final Symbol symbol, final SortSymbolContext context, final SortField.Type reducedType, final boolean missingNullValue) {
InputFactory.Context<? extends LuceneCollectorExpression<?>> inputContext = docInputFactory.getCtx();
final Input input = inputContext.add(symbol);
final Collection<? extends LuceneCollectorExpression<?>> expressions = inputContext.expressions();
return new SortField(name, new IndexFieldData.XFieldComparatorSource() {
@Override
public FieldComparator<?> newComparator(String fieldName, int numHits, int sortPos, boolean reversed) throws IOException {
for (LuceneCollectorExpression collectorExpression : expressions) {
collectorExpression.startCollect(context.context);
}
DataType dataType = symbol.valueType();
Object missingValue = missingNullValue ? null : SortSymbolVisitor.missingObject(dataType, SortOrder.missing(context.reverseFlag, context.nullFirst), reversed);
if (context.context.visitor().required()) {
return new FieldsVisitorInputFieldComparator(numHits, context.context.visitor(), expressions, input, dataType, missingValue);
} else {
return new InputFieldComparator(numHits, expressions, input, dataType, missingValue);
}
}
@Override
public SortField.Type reducedType() {
return reducedType;
}
}, context.reverseFlag);
}
use of org.apache.lucene.search.FieldComparator in project Solbase by Photobucket.
the class SolbaseQueryComponent method process.
/**
* Actually run the query
*/
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
SolrIndexSearcher searcher = req.getSearcher();
if (rb.getQueryCommand().getOffset() < 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative");
}
// -1 as flag if not set.
long timeAllowed = (long) params.getInt(CommonParams.TIME_ALLOWED, -1);
// Optional: This could also be implemented by the top-level searcher
// sending
// a filter that lists the ids... that would be transparent to
// the request handler, but would be more expensive (and would preserve
// score
// too if desired).
String ids = params.get(ShardParams.IDS);
if (ids != null) {
List<String> idArr = StrUtils.splitSmart(ids, ",", true);
int[] luceneIds = new int[idArr.size()];
int docs = 0;
for (int i = 0; i < idArr.size(); i++) {
luceneIds[docs++] = Integer.parseInt(idArr.get(i));
}
// we are indexing docId as solr uniq_id. by doing this, we are
// bound to INTEGER.MAX_VALUE ~= 2 billion
// docs is number of docs
DocListAndSet res = new DocListAndSet();
res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0, null);
if (rb.isNeedDocSet()) {
List<Query> queries = new ArrayList<Query>();
queries.add(rb.getQuery());
List<Query> filters = rb.getFilters();
if (filters != null)
queries.addAll(filters);
res.docSet = searcher.getDocSet(queries);
}
rb.setResults(res);
rsp.add("response", rb.getResults().docList);
return;
}
SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand();
cmd.setTimeAllowed(timeAllowed);
SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult();
searcher.search(result, cmd);
rb.setResult(result);
rsp.add("response", rb.getResults().docList);
rsp.getToLog().add("hits", rb.getResults().docList.matches());
// The query cache doesn't currently store sort field values, and
// SolrIndexSearcher doesn't
// currently have an option to return sort field values. Because of
// this, we
// take the documents given and re-derive the sort values.
boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false);
if (fsv) {
Sort sort = rb.getSortSpec().getSort();
SortField[] sortFields = sort == null ? new SortField[] { SortField.FIELD_SCORE } : sort.getSort();
// order is important for the
NamedList sortVals = new NamedList();
// sort fields
// a
Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO);
// dummy
// Field
SolrIndexReader reader = searcher.getReader();
SolrIndexReader[] readers = reader.getLeafReaders();
SolrIndexReader subReader = reader;
if (readers.length == 1) {
// if there is a single segment, use that subReader and avoid
// looking up each time
subReader = readers[0];
readers = null;
}
int[] offsets = reader.getLeafOffsets();
//TODO: need to fetch sort value from collector instead of re-derive lookup from id
for (SortField sortField : sortFields) {
int type = sortField.getType();
if (type == SortField.SCORE || type == SortField.DOC)
continue;
FieldComparator comparator = null;
FieldComparator[] comparators = (readers == null) ? null : new FieldComparator[readers.length];
String fieldname = sortField.getField();
FieldType ft = fieldname == null ? null : req.getSchema().getFieldTypeNoEx(fieldname);
DocSlice docList = (DocSlice) rb.getResults().docList;
ArrayList<Object> vals = new ArrayList<Object>(docList.size());
for (int i = docList.offset; i < docList.len; i++) {
vals.add(new Integer(docList.sorts[i][((EmbeddedSortField) sortField).getFieldNumber() - 1]));
}
sortVals.add(fieldname, vals);
}
rsp.add("sort_values", sortVals);
}
// pre-fetch returned documents
if (!req.getParams().getBool(ShardParams.IS_SHARD, false) && rb.getResults().docList != null && rb.getResults().docList.size() <= 50) {
// TODO: this may depend on the highlighter component (or other
// components?)
SolrPluginUtils.optimizePreFetchDocs(rb.getResults().docList, rb.getQuery(), req, rsp);
}
}
Aggregations