use of org.apache.lucene.queries.mlt.MoreLikeThis in project elasticsearch by elastic.
the class XMoreLikeThisTests method testTopN.
public void testTopN() throws Exception {
int numDocs = 100;
int topN = 25;
// add series of docs with terms of decreasing df
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
for (int i = 0; i < numDocs; i++) {
addDoc(writer, generateStrSeq(0, i + 1));
}
IndexReader reader = writer.getReader();
writer.close();
// setup MLT query
MoreLikeThis mlt = new MoreLikeThis(reader);
mlt.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
mlt.setMaxQueryTerms(topN);
mlt.setMinDocFreq(1);
mlt.setMinTermFreq(1);
mlt.setMinWordLen(1);
mlt.setFieldNames(new String[] { "text" });
// perform MLT query
String likeText = "";
for (String text : generateStrSeq(0, numDocs)) {
likeText += text + " ";
}
BooleanQuery query = (BooleanQuery) mlt.like("text", new StringReader(likeText));
// check best terms are topN of highest idf
List<BooleanClause> clauses = query.clauses();
assertEquals("Expected" + topN + "clauses only!", topN, clauses.size());
Term[] expectedTerms = new Term[topN];
int idx = 0;
for (String text : generateStrSeq(numDocs - topN, topN)) {
expectedTerms[idx++] = new Term("text", text);
}
for (BooleanClause clause : clauses) {
Term term = ((TermQuery) clause.getQuery()).getTerm();
assertTrue(Arrays.asList(expectedTerms).contains(term));
}
// clean up
reader.close();
dir.close();
}
use of org.apache.lucene.queries.mlt.MoreLikeThis in project lucene-solr by apache.
the class CloudMLTQParser method parse.
public Query parse() {
String id = localParams.get(QueryParsing.V);
// Do a Real Time Get for the document
SolrDocument doc = getDocument(id);
if (doc == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error completing MLT request. Could not fetch " + "document with id [" + id + "]");
}
String[] qf = localParams.getParams("qf");
Map<String, Float> boostFields = new HashMap<>();
MoreLikeThis mlt = new MoreLikeThis(req.getSearcher().getIndexReader());
mlt.setMinTermFreq(localParams.getInt("mintf", MoreLikeThis.DEFAULT_MIN_TERM_FREQ));
mlt.setMinDocFreq(localParams.getInt("mindf", 0));
mlt.setMinWordLen(localParams.getInt("minwl", MoreLikeThis.DEFAULT_MIN_WORD_LENGTH));
mlt.setMaxWordLen(localParams.getInt("maxwl", MoreLikeThis.DEFAULT_MAX_WORD_LENGTH));
mlt.setMaxQueryTerms(localParams.getInt("maxqt", MoreLikeThis.DEFAULT_MAX_QUERY_TERMS));
mlt.setMaxNumTokensParsed(localParams.getInt("maxntp", MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED));
mlt.setMaxDocFreq(localParams.getInt("maxdf", MoreLikeThis.DEFAULT_MAX_DOC_FREQ));
Boolean boost = localParams.getBool("boost", MoreLikeThis.DEFAULT_BOOST);
mlt.setBoost(boost);
mlt.setAnalyzer(req.getSchema().getIndexAnalyzer());
Map<String, Collection<Object>> filteredDocument = new HashMap<>();
String[] fieldNames;
if (qf != null) {
ArrayList<String> fields = new ArrayList();
for (String fieldName : qf) {
if (!StringUtils.isEmpty(fieldName)) {
String[] strings = splitList.split(fieldName);
for (String string : strings) {
if (!StringUtils.isEmpty(string)) {
fields.add(string);
}
}
}
}
// Parse field names and boosts from the fields
boostFields = SolrPluginUtils.parseFieldBoosts(fields.toArray(new String[0]));
fieldNames = boostFields.keySet().toArray(new String[0]);
} else {
ArrayList<String> fields = new ArrayList();
for (String field : doc.getFieldNames()) {
// Only use fields that are stored and have an explicit analyzer.
// This makes sense as the query uses tf/idf/.. for query construction.
// We might want to relook and change this in the future though.
SchemaField f = req.getSchema().getFieldOrNull(field);
if (f != null && f.stored() && f.getType().isExplicitAnalyzer()) {
fields.add(field);
}
}
fieldNames = fields.toArray(new String[0]);
}
if (fieldNames.length < 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "MoreLikeThis requires at least one similarity field: qf");
}
mlt.setFieldNames(fieldNames);
for (String field : fieldNames) {
Collection<Object> fieldValues = doc.getFieldValues(field);
if (fieldValues != null) {
Collection<Object> values = new ArrayList();
for (Object val : fieldValues) {
if (val instanceof IndexableField) {
values.add(((IndexableField) val).stringValue());
} else {
values.add(val);
}
}
filteredDocument.put(field, values);
}
}
try {
Query rawMLTQuery = mlt.like(filteredDocument);
BooleanQuery boostedMLTQuery = (BooleanQuery) rawMLTQuery;
if (boost && boostFields.size() > 0) {
BooleanQuery.Builder newQ = new BooleanQuery.Builder();
newQ.setMinimumNumberShouldMatch(boostedMLTQuery.getMinimumNumberShouldMatch());
for (BooleanClause clause : boostedMLTQuery) {
Query q = clause.getQuery();
float originalBoost = 1f;
if (q instanceof BoostQuery) {
BoostQuery bq = (BoostQuery) q;
q = bq.getQuery();
originalBoost = bq.getBoost();
}
Float fieldBoost = boostFields.get(((TermQuery) q).getTerm().field());
q = ((fieldBoost != null) ? new BoostQuery(q, fieldBoost * originalBoost) : clause.getQuery());
newQ.add(q, clause.getOccur());
}
boostedMLTQuery = newQ.build();
}
// exclude current document from results
BooleanQuery.Builder realMLTQuery = new BooleanQuery.Builder();
realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
realMLTQuery.add(createIdQuery(req.getSchema().getUniqueKeyField().getName(), id), BooleanClause.Occur.MUST_NOT);
return realMLTQuery.build();
} catch (IOException e) {
e.printStackTrace();
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad Request");
}
}
use of org.apache.lucene.queries.mlt.MoreLikeThis in project lucene-solr by apache.
the class SimpleMLTQParser method parse.
public Query parse() {
String defaultField = req.getSchema().getUniqueKeyField().getName();
String uniqueValue = localParams.get(QueryParsing.V);
String[] qf = localParams.getParams("qf");
SolrIndexSearcher searcher = req.getSearcher();
Query docIdQuery = createIdQuery(defaultField, uniqueValue);
Map<String, Float> boostFields = new HashMap<>();
try {
TopDocs td = searcher.search(docIdQuery, 1);
if (td.totalHits != 1)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error completing MLT request. Could not fetch " + "document with id [" + uniqueValue + "]");
ScoreDoc[] scoreDocs = td.scoreDocs;
MoreLikeThis mlt = new MoreLikeThis(req.getSearcher().getIndexReader());
mlt.setMinTermFreq(localParams.getInt("mintf", MoreLikeThis.DEFAULT_MIN_TERM_FREQ));
mlt.setMinDocFreq(localParams.getInt("mindf", MoreLikeThis.DEFAULT_MIN_DOC_FREQ));
mlt.setMinWordLen(localParams.getInt("minwl", MoreLikeThis.DEFAULT_MIN_WORD_LENGTH));
mlt.setMaxWordLen(localParams.getInt("maxwl", MoreLikeThis.DEFAULT_MAX_WORD_LENGTH));
mlt.setMaxQueryTerms(localParams.getInt("maxqt", MoreLikeThis.DEFAULT_MAX_QUERY_TERMS));
mlt.setMaxNumTokensParsed(localParams.getInt("maxntp", MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED));
mlt.setMaxDocFreq(localParams.getInt("maxdf", MoreLikeThis.DEFAULT_MAX_DOC_FREQ));
Boolean boost = localParams.getBool("boost", false);
mlt.setBoost(boost);
String[] fieldNames;
if (qf != null) {
ArrayList<String> fields = new ArrayList<>();
for (String fieldName : qf) {
if (!StringUtils.isEmpty(fieldName)) {
String[] strings = splitList.split(fieldName);
for (String string : strings) {
if (!StringUtils.isEmpty(string)) {
fields.add(string);
}
}
}
}
// Parse field names and boosts from the fields
boostFields = SolrPluginUtils.parseFieldBoosts(fields.toArray(new String[0]));
fieldNames = boostFields.keySet().toArray(new String[0]);
} else {
Map<String, SchemaField> fieldDefinitions = req.getSearcher().getSchema().getFields();
ArrayList<String> fields = new ArrayList();
for (String fieldName : fieldDefinitions.keySet()) {
if (fieldDefinitions.get(fieldName).indexed() && fieldDefinitions.get(fieldName).stored())
if (fieldDefinitions.get(fieldName).getType().getNumberType() == null)
fields.add(fieldName);
}
fieldNames = fields.toArray(new String[0]);
}
if (fieldNames.length < 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "MoreLikeThis requires at least one similarity field: qf");
}
mlt.setFieldNames(fieldNames);
mlt.setAnalyzer(req.getSchema().getIndexAnalyzer());
Query rawMLTQuery = mlt.like(scoreDocs[0].doc);
BooleanQuery boostedMLTQuery = (BooleanQuery) rawMLTQuery;
if (boost && boostFields.size() > 0) {
BooleanQuery.Builder newQ = new BooleanQuery.Builder();
newQ.setMinimumNumberShouldMatch(boostedMLTQuery.getMinimumNumberShouldMatch());
for (BooleanClause clause : boostedMLTQuery) {
Query q = clause.getQuery();
float originalBoost = 1f;
if (q instanceof BoostQuery) {
BoostQuery bq = (BoostQuery) q;
q = bq.getQuery();
originalBoost = bq.getBoost();
}
Float fieldBoost = boostFields.get(((TermQuery) q).getTerm().field());
q = ((fieldBoost != null) ? new BoostQuery(q, fieldBoost * originalBoost) : clause.getQuery());
newQ.add(q, clause.getOccur());
}
boostedMLTQuery = newQ.build();
}
// exclude current document from results
BooleanQuery.Builder realMLTQuery = new BooleanQuery.Builder();
realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
realMLTQuery.add(docIdQuery, BooleanClause.Occur.MUST_NOT);
return realMLTQuery.build();
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error completing MLT request" + e.getMessage());
}
}
use of org.apache.lucene.queries.mlt.MoreLikeThis in project jackrabbit-oak by apache.
the class MoreLikeThisHelper method getMoreLikeThis.
public static Query getMoreLikeThis(IndexReader reader, Analyzer analyzer, String mltQueryString) {
Query moreLikeThisQuery = null;
MoreLikeThis mlt = new MoreLikeThis(reader);
mlt.setAnalyzer(analyzer);
try {
String text = null;
String[] fields = {};
for (String param : mltQueryString.split("&")) {
String[] keyValuePair = param.split("=");
if (keyValuePair.length != 2 || keyValuePair[0] == null || keyValuePair[1] == null) {
throw new RuntimeException("Unparsable native Lucene MLT query: " + mltQueryString);
} else {
if ("stream.body".equals(keyValuePair[0])) {
text = keyValuePair[1];
} else if ("mlt.fl".equals(keyValuePair[0])) {
fields = keyValuePair[1].split(",");
} else if ("mlt.mindf".equals(keyValuePair[0])) {
mlt.setMinDocFreq(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.mintf".equals(keyValuePair[0])) {
mlt.setMinTermFreq(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.boost".equals(keyValuePair[0])) {
mlt.setBoost(Boolean.parseBoolean(keyValuePair[1]));
} else if ("mlt.qf".equals(keyValuePair[0])) {
mlt.setBoostFactor(Float.parseFloat(keyValuePair[1]));
} else if ("mlt.maxdf".equals(keyValuePair[0])) {
mlt.setMaxDocFreq(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.maxdfp".equals(keyValuePair[0])) {
mlt.setMaxDocFreqPct(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.maxntp".equals(keyValuePair[0])) {
mlt.setMaxNumTokensParsed(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.maxqt".equals(keyValuePair[0])) {
mlt.setMaxQueryTerms(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.maxwl".equals(keyValuePair[0])) {
mlt.setMaxWordLen(Integer.parseInt(keyValuePair[1]));
} else if ("mlt.minwl".equals(keyValuePair[0])) {
mlt.setMinWordLen(Integer.parseInt(keyValuePair[1]));
}
}
}
if (text != null) {
if (FieldNames.PATH.equals(fields[0])) {
IndexSearcher searcher = new IndexSearcher(reader);
TermQuery q = new TermQuery(new Term(FieldNames.PATH, text));
TopDocs top = searcher.search(q, 1);
if (top.totalHits == 0) {
mlt.setFieldNames(fields);
moreLikeThisQuery = mlt.like(new StringReader(text), mlt.getFieldNames()[0]);
} else {
ScoreDoc d = top.scoreDocs[0];
Document doc = reader.document(d.doc);
List<String> fieldNames = new ArrayList<String>();
for (IndexableField f : doc.getFields()) {
if (!FieldNames.PATH.equals(f.name())) {
fieldNames.add(f.name());
}
}
String[] docFields = fieldNames.toArray(new String[fieldNames.size()]);
mlt.setFieldNames(docFields);
moreLikeThisQuery = mlt.like(d.doc);
}
} else {
mlt.setFieldNames(fields);
moreLikeThisQuery = mlt.like(new StringReader(text), mlt.getFieldNames()[0]);
}
}
return moreLikeThisQuery;
} catch (Exception e) {
throw new RuntimeException("could not handle MLT query " + mltQueryString);
}
}
Aggregations