use of org.apache.lucene.queryParser.QueryParser in project bigbluebutton by bigbluebutton.
the class Search method searchByScore.
public TopDocCollectorSearchResult searchByScore(String queryStr, int startFrom, String operator) {
try {
queryStr = queryStr.trim();
QueryParser parser = new QueryParser("contents", analyzer);
if (QueryParser.AND_OPERATOR.toString().equalsIgnoreCase(operator)) {
parser.setDefaultOperator(QueryParser.AND_OPERATOR);
} else {
parser.setDefaultOperator(QueryParser.OR_OPERATOR);
}
Query query;
query = parser.parse(queryStr);
TopDocCollector collector = doPagingSearch(query, startFrom);
TopDocCollectorSearchResult result = new TopDocCollectorSearchResult(collector, searcher);
return result;
} catch (ParseException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
// System.out.println("Searching for: " + query.toString("contents"));
// doPagingSearch(in, searcher, query, hitsPerPage, raw, queries ==
// null);
// }
// reader.close();
}
use of org.apache.lucene.queryParser.QueryParser in project bigbluebutton by bigbluebutton.
the class Search method searchBySession.
public TopFieldDocsSearchResult searchBySession(String queryStr, int startFrom, String operator) {
try {
queryStr = queryStr.trim();
QueryParser parser = new QueryParser("contents", analyzer);
Operator op = QueryParser.AND_OPERATOR;
if (QueryParser.AND_OPERATOR.toString().equalsIgnoreCase(operator)) {
parser.setDefaultOperator(QueryParser.AND_OPERATOR);
} else {
parser.setDefaultOperator(QueryParser.OR_OPERATOR);
}
Query query;
query = parser.parse(queryStr);
Sort sort = new Sort("summary", true);
TopFieldDocs tfd = searcher.search(query, null, startFrom + 10, sort);
TopFieldDocsSearchResult result = new TopFieldDocsSearchResult(tfd, searcher);
return result;
} catch (ParseException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
use of org.apache.lucene.queryParser.QueryParser in project jforum2 by rafaelsteil.
the class LuceneSearch method performSearch.
private SearchResult performSearch(SearchArgs args, LuceneResultCollector resultCollector, Filter filter) {
SearchResult result;
try {
StringBuffer criteria = new StringBuffer(256);
this.filterByForum(args, criteria);
this.filterByKeywords(args, criteria);
this.filterByDateRange(args, criteria);
Query query = new QueryParser("", new StandardAnalyzer()).parse(criteria.toString());
if (logger.isDebugEnabled()) {
logger.debug("Generated query: " + query);
}
Hits hits = filter == null ? this.search.search(query, this.getSorter(args)) : this.search.search(query, filter, this.getSorter(args));
if (hits != null && hits.length() > 0) {
result = new SearchResult(resultCollector.collect(args, hits, query), hits.length());
} else {
result = new SearchResult(new ArrayList(), 0);
}
} catch (Exception e) {
throw new SearchException(e);
}
return result;
}
use of org.apache.lucene.queryParser.QueryParser in project h2database by h2database.
the class FullTextLucene method search.
/**
* Do the search.
*
* @param conn the database connection
* @param text the query
* @param limit the limit
* @param offset the offset
* @param data whether the raw data should be returned
* @return the result set
*/
protected static ResultSet search(Connection conn, String text, int limit, int offset, boolean data) throws SQLException {
SimpleResultSet result = createResultSet(data);
if (conn.getMetaData().getURL().startsWith("jdbc:columnlist:")) {
// this is just to query the result set columns
return result;
}
if (text == null || text.trim().length() == 0) {
return result;
}
try {
IndexAccess access = getIndexAccess(conn);
// take a reference as the searcher may change
IndexSearcher searcher = access.getSearcher();
try {
// reuse the same analyzer; it's thread-safe;
// also allows subclasses to control the analyzer used.
Analyzer analyzer = access.writer.getAnalyzer();
QueryParser parser = new QueryParser(Version.LUCENE_30, LUCENE_FIELD_DATA, analyzer);
Query query = parser.parse(text);
// Lucene 3 insists on a hard limit and will not provide
// a total hits value. Take at least 100 which is
// an optimal limit for Lucene as any more
// will trigger writing results to disk.
int maxResults = (limit == 0 ? 100 : limit) + offset;
TopDocs docs = searcher.search(query, maxResults);
if (limit == 0) {
limit = docs.totalHits;
}
for (int i = 0, len = docs.scoreDocs.length; i < limit && i + offset < docs.totalHits && i + offset < len; i++) {
ScoreDoc sd = docs.scoreDocs[i + offset];
Document doc = searcher.doc(sd.doc);
float score = sd.score;
String q = doc.get(LUCENE_FIELD_QUERY);
if (data) {
int idx = q.indexOf(" WHERE ");
JdbcConnection c = (JdbcConnection) conn;
Session session = (Session) c.getSession();
Parser p = new Parser(session);
String tab = q.substring(0, idx);
ExpressionColumn expr = (ExpressionColumn) p.parseExpression(tab);
String schemaName = expr.getOriginalTableAliasName();
String tableName = expr.getColumnName();
q = q.substring(idx + " WHERE ".length());
Object[][] columnData = parseKey(conn, q);
result.addRow(schemaName, tableName, columnData[0], columnData[1], score);
} else {
result.addRow(q, score);
}
}
} finally {
access.returnSearcher(searcher);
}
} catch (Exception e) {
throw convertException(e);
}
return result;
}
use of org.apache.lucene.queryParser.QueryParser in project Gemma by PavlidisLab.
the class LuceneTest method luceneRamIndexTest.
/**
* Searching uses a ram index to deal with queries using logical operators. Though it can often be finiky.
*/
@Test
public void luceneRamIndexTest() throws Exception {
try (RAMDirectory idx = new RAMDirectory();
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_36)) {
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_36, analyzer);
try (IndexWriter writer = new IndexWriter(idx, iwc)) {
Document doc = new Document();
Field f = new Field("content", "I have a small braintest", Field.Store.YES, Field.Index.ANALYZED);
doc.add(f);
writer.addDocument(doc);
doc = new Document();
f = new Field("content", "I have a small braddintest", Field.Store.YES, Field.Index.ANALYZED);
doc.add(f);
writer.addDocument(doc);
doc = new Document();
f = new Field("content", "I have a small brasaaafintest", Field.Store.YES, Field.Index.ANALYZED);
doc.add(f);
writer.addDocument(doc);
doc = new Document();
f = new Field("content", "I have a small braidagagntest", Field.Store.YES, Field.Index.ANALYZED);
doc.add(f);
writer.addDocument(doc);
}
try (IndexReader ir = IndexReader.open(idx);
IndexSearcher searcher = new IndexSearcher(ir)) {
TopDocsCollector<ScoreDoc> hc = TopScoreDocCollector.create(1, true);
QueryParser parser = new QueryParser(Version.LUCENE_36, "content", analyzer);
Query parsedQuery;
parsedQuery = parser.parse("braintest");
searcher.search(parsedQuery, hc);
TopDocs topDocs = hc.topDocs();
int hitcount = topDocs.totalHits;
assertTrue(hitcount > 0);
}
}
}
Aggregations