use of org.apache.lucene.search.MatchNoDocsQuery in project lucene-solr by apache.
the class JoinUtil method createJoinQuery.
/**
* A query time join using global ordinals over a dedicated join field.
*
* This join has certain restrictions and requirements:
* 1) A document can only refer to one other document. (but can be referred by one or more documents)
* 2) Documents on each side of the join must be distinguishable. Typically this can be done by adding an extra field
* that identifies the "from" and "to" side and then the fromQuery and toQuery must take the this into account.
* 3) There must be a single sorted doc values join field used by both the "from" and "to" documents. This join field
* should store the join values as UTF-8 strings.
* 4) An ordinal map must be provided that is created on top of the join field.
*
* Note: min and max filtering and the avg score mode will require this join to keep track of the number of times
* a document matches per join value. This will increase the per join cost in terms of execution time and memory.
*
* @param joinField The {@link SortedDocValues} field containing the join values
* @param fromQuery The query containing the actual user query. Also the fromQuery can only match "from" documents.
* @param toQuery The query identifying all documents on the "to" side.
* @param searcher The index searcher used to execute the from query
* @param scoreMode Instructs how scores from the fromQuery are mapped to the returned query
* @param ordinalMap The ordinal map constructed over the joinField. In case of a single segment index, no ordinal map
* needs to be provided.
* @param min Optionally the minimum number of "from" documents that are required to match for a "to" document
* to be a match. The min is inclusive. Setting min to 0 and max to <code>Interger.MAX_VALUE</code>
* disables the min and max "from" documents filtering
* @param max Optionally the maximum number of "from" documents that are allowed to match for a "to" document
* to be a match. The max is inclusive. Setting min to 0 and max to <code>Interger.MAX_VALUE</code>
* disables the min and max "from" documents filtering
* @return a {@link Query} instance that can be used to join documents based on the join field
* @throws IOException If I/O related errors occur
*/
public static Query createJoinQuery(String joinField, Query fromQuery, Query toQuery, IndexSearcher searcher, ScoreMode scoreMode, MultiDocValues.OrdinalMap ordinalMap, int min, int max) throws IOException {
int numSegments = searcher.getIndexReader().leaves().size();
final long valueCount;
if (numSegments == 0) {
return new MatchNoDocsQuery("JoinUtil.createJoinQuery with no segments");
} else if (numSegments == 1) {
// No need to use the ordinal map, because there is just one segment.
ordinalMap = null;
LeafReader leafReader = searcher.getIndexReader().leaves().get(0).reader();
SortedDocValues joinSortedDocValues = leafReader.getSortedDocValues(joinField);
if (joinSortedDocValues != null) {
valueCount = joinSortedDocValues.getValueCount();
} else {
return new MatchNoDocsQuery("JoinUtil.createJoinQuery: no join values");
}
} else {
if (ordinalMap == null) {
throw new IllegalArgumentException("OrdinalMap is required, because there is more than 1 segment");
}
valueCount = ordinalMap.getValueCount();
}
final Query rewrittenFromQuery = searcher.rewrite(fromQuery);
final Query rewrittenToQuery = searcher.rewrite(toQuery);
GlobalOrdinalsWithScoreCollector globalOrdinalsWithScoreCollector;
switch(scoreMode) {
case Total:
globalOrdinalsWithScoreCollector = new GlobalOrdinalsWithScoreCollector.Sum(joinField, ordinalMap, valueCount, min, max);
break;
case Min:
globalOrdinalsWithScoreCollector = new GlobalOrdinalsWithScoreCollector.Min(joinField, ordinalMap, valueCount, min, max);
break;
case Max:
globalOrdinalsWithScoreCollector = new GlobalOrdinalsWithScoreCollector.Max(joinField, ordinalMap, valueCount, min, max);
break;
case Avg:
globalOrdinalsWithScoreCollector = new GlobalOrdinalsWithScoreCollector.Avg(joinField, ordinalMap, valueCount, min, max);
break;
case None:
if (min <= 0 && max == Integer.MAX_VALUE) {
GlobalOrdinalsCollector globalOrdinalsCollector = new GlobalOrdinalsCollector(joinField, ordinalMap, valueCount);
searcher.search(rewrittenFromQuery, globalOrdinalsCollector);
return new GlobalOrdinalsQuery(globalOrdinalsCollector.getCollectorOrdinals(), joinField, ordinalMap, rewrittenToQuery, rewrittenFromQuery, searcher.getTopReaderContext().id());
} else {
globalOrdinalsWithScoreCollector = new GlobalOrdinalsWithScoreCollector.NoScore(joinField, ordinalMap, valueCount, min, max);
break;
}
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, "Score mode %s isn't supported.", scoreMode));
}
searcher.search(rewrittenFromQuery, globalOrdinalsWithScoreCollector);
return new GlobalOrdinalsWithScoreQuery(globalOrdinalsWithScoreCollector, scoreMode, joinField, ordinalMap, rewrittenToQuery, rewrittenFromQuery, min, max, searcher.getTopReaderContext().id());
}
use of org.apache.lucene.search.MatchNoDocsQuery in project lucene-solr by apache.
the class BinaryPoint method newSetQuery.
/**
* Create a query matching any of the specified 1D values. This is the points equivalent of {@code TermsQuery}.
*
* @param field field name. must not be {@code null}.
* @param values all values to match
*/
public static Query newSetQuery(String field, byte[]... values) {
// Make sure all byte[] have the same length
int bytesPerDim = -1;
for (byte[] value : values) {
if (bytesPerDim == -1) {
bytesPerDim = value.length;
} else if (value.length != bytesPerDim) {
throw new IllegalArgumentException("all byte[] must be the same length, but saw " + bytesPerDim + " and " + value.length);
}
}
if (bytesPerDim == -1) {
// There are no points, and we cannot guess the bytesPerDim here, so we return an equivalent query:
return new MatchNoDocsQuery("empty BinaryPoint.newSetQuery");
}
// Don't unexpectedly change the user's incoming values array:
byte[][] sortedValues = values.clone();
Arrays.sort(sortedValues, new Comparator<byte[]>() {
@Override
public int compare(byte[] a, byte[] b) {
return StringHelper.compare(a.length, a, 0, b, 0);
}
});
final BytesRef encoded = new BytesRef(new byte[bytesPerDim]);
return new PointInSetQuery(field, 1, bytesPerDim, new PointInSetQuery.Stream() {
int upto;
@Override
public BytesRef next() {
if (upto == sortedValues.length) {
return null;
} else {
encoded.bytes = sortedValues[upto];
upto++;
return encoded;
}
}
}) {
@Override
protected String toString(byte[] value) {
return new BytesRef(value).toString();
}
};
}
use of org.apache.lucene.search.MatchNoDocsQuery in project lucene-solr by apache.
the class TestSolrCoreParser method testGoodbye.
public void testGoodbye() throws IOException, ParserException {
final Query query = parseXmlString("<GoodbyeQuery/>");
assertTrue(query instanceof MatchNoDocsQuery);
}
use of org.apache.lucene.search.MatchNoDocsQuery in project elasticsearch by elastic.
the class GeoShapeQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) {
if (shape == null) {
throw new UnsupportedOperationException("query must be rewritten first");
}
final ShapeBuilder shapeToQuery = shape;
final MappedFieldType fieldType = context.fieldMapper(fieldName);
if (fieldType == null) {
if (ignoreUnmapped) {
return new MatchNoDocsQuery();
} else {
throw new QueryShardException(context, "failed to find geo_shape field [" + fieldName + "]");
}
}
// TODO: This isn't the nicest way to check this
if (!(fieldType instanceof GeoShapeFieldMapper.GeoShapeFieldType)) {
throw new QueryShardException(context, "Field [" + fieldName + "] is not a geo_shape");
}
final GeoShapeFieldMapper.GeoShapeFieldType shapeFieldType = (GeoShapeFieldMapper.GeoShapeFieldType) fieldType;
PrefixTreeStrategy strategy = shapeFieldType.defaultStrategy();
if (this.strategy != null) {
strategy = shapeFieldType.resolveStrategy(this.strategy);
}
Query query;
if (strategy instanceof RecursivePrefixTreeStrategy && relation == ShapeRelation.DISJOINT) {
// this strategy doesn't support disjoint anymore: but it did
// before, including creating lucene fieldcache (!)
// in this case, execute disjoint as exists && !intersects
BooleanQuery.Builder bool = new BooleanQuery.Builder();
Query exists = ExistsQueryBuilder.newFilter(context, fieldName);
Query intersects = strategy.makeQuery(getArgs(shapeToQuery, ShapeRelation.INTERSECTS));
bool.add(exists, BooleanClause.Occur.MUST);
bool.add(intersects, BooleanClause.Occur.MUST_NOT);
query = new ConstantScoreQuery(bool.build());
} else {
query = new ConstantScoreQuery(strategy.makeQuery(getArgs(shapeToQuery, relation)));
}
return query;
}
use of org.apache.lucene.search.MatchNoDocsQuery in project elasticsearch by elastic.
the class HasChildQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
Query innerQuery;
final String[] previousTypes = context.getTypes();
context.setTypes(type);
try {
innerQuery = query.toQuery(context);
} finally {
context.setTypes(previousTypes);
}
DocumentMapper childDocMapper = context.documentMapper(type);
if (childDocMapper == null) {
if (ignoreUnmapped) {
return new MatchNoDocsQuery();
} else {
throw new QueryShardException(context, "[" + NAME + "] no mapping found for type [" + type + "]");
}
}
ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper();
if (parentFieldMapper.active() == false) {
throw new QueryShardException(context, "[" + NAME + "] _parent field has no parent type configured");
}
String parentType = parentFieldMapper.type();
DocumentMapper parentDocMapper = context.getMapperService().documentMapper(parentType);
if (parentDocMapper == null) {
throw new QueryShardException(context, "[" + NAME + "] Type [" + type + "] points to a non existent parent type [" + parentType + "]");
}
// wrap the query with type query
innerQuery = Queries.filtered(innerQuery, childDocMapper.typeFilter());
final ParentChildIndexFieldData parentChildIndexFieldData = context.getForField(parentFieldMapper.fieldType());
return new LateParsingQuery(parentDocMapper.typeFilter(), innerQuery, minChildren(), maxChildren(), parentType, scoreMode, parentChildIndexFieldData, context.getSearchSimilarity());
}
Aggregations