use of org.apache.lucene.analysis.standard.StandardAnalyzer in project lucene-skos by behas.
the class SKOSAnalyzer method createComponents.
@Override
protected TokenStreamComponents createComponents(String fileName) {
if (expansionType.equals(ExpansionType.URI)) {
final KeywordTokenizer src = new KeywordTokenizer();
TokenStream tok = new SKOSURIFilter(src, skosEngine, new StandardAnalyzer(), types);
tok = new LowerCaseFilter(tok);
return new TokenStreamComponents(src, tok);
} else {
final StandardTokenizer src = new StandardTokenizer();
src.setMaxTokenLength(maxTokenLength);
TokenStream tok = new StandardFilter(src);
// prior to this we get the classic behavior, standardfilter does it for us.
tok = new SKOSLabelFilter(tok, skosEngine, new StandardAnalyzer(), bufferSize, types);
tok = new LowerCaseFilter(tok);
tok = new StopFilter(tok, stopwords);
tok = new RemoveDuplicatesTokenFilter(tok);
return new TokenStreamComponents(src, tok) {
@Override
protected void setReader(final Reader reader) throws IOException {
src.setMaxTokenLength(maxTokenLength);
super.setReader(reader);
}
};
}
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project lucene-skos by behas.
the class SKOSLabelFilterTest method queryParserSearch.
@Test
public void queryParserSearch() throws IOException, QueryNodeException {
Document doc = new Document();
doc.add(new Field("content", "The quick brown fox jumps over the lazy dog", TextField.TYPE_STORED));
writer.addDocument(doc);
searcher = new IndexSearcher(DirectoryReader.open(writer, false));
Query query = new StandardQueryParser(skosAnalyzer).parse("\"fox jumps\"", "content");
assertEquals(1, searcher.search(query, 1).totalHits);
assertEquals("content:\"fox (jumps hops leaps)\"", query.toString());
assertEquals("org.apache.lucene.search.MultiPhraseQuery", query.getClass().getName());
query = new StandardQueryParser(new StandardAnalyzer()).parse("\"fox jumps\"", "content");
assertEquals(1, searcher.search(query, 1).totalHits);
assertEquals("content:\"fox jumps\"", query.toString());
assertEquals("org.apache.lucene.search.PhraseQuery", query.getClass().getName());
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project lucene-skos by behas.
the class SKOSStandardQueryParserTest method queryParserSearch.
@Test
public void queryParserSearch() throws IOException, QueryNodeException {
Document doc = new Document();
doc.add(new Field("content", "The quick brown fox jumps over the lazy dog", TextField.TYPE_STORED));
writer.addDocument(doc);
searcher = new IndexSearcher(DirectoryReader.open(writer, false));
Query query = new SKOSStandardQueryParser(skosAnalyzer).parse("\"fox jumps\"", "content");
assertEquals(1, searcher.search(query, 1).totalHits);
assertEquals("content:\"fox (jumps hops leaps)\"", query.toString());
assertEquals("org.apache.lucene.search.MultiPhraseQuery", query.getClass().getName());
query = new StandardQueryParser(new StandardAnalyzer()).parse("\"fox jumps\"", "content");
assertEquals(1, searcher.search(query, 1).totalHits);
assertEquals("content:\"fox jumps\"", query.toString());
assertEquals("org.apache.lucene.search.PhraseQuery", query.getClass().getName());
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project bigbluebutton by bigbluebutton.
the class Search method startSearch.
public void startSearch() {
try {
IndexReader.unlock(FSDirectory.getDirectory(ConfigHandler.indexPath));
reader = IndexReader.open(ConfigHandler.indexPath);
searcher = new IndexSearcher(reader);
analyzer = new StandardAnalyzer();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class TicketIndexer method delete.
/**
* Delete a ticket from the Lucene index.
*
* @param repository
* @param ticketId
* @throws Exception
* @return true, if deleted, false if no record was deleted
*/
private boolean delete(String repository, long ticketId, IndexWriter writer) throws Exception {
StandardAnalyzer analyzer = new StandardAnalyzer();
QueryParser qp = new QueryParser(Lucene.did.name(), analyzer);
BooleanQuery query = new BooleanQuery.Builder().add(qp.parse(StringUtils.getSHA1(repository + ticketId)), Occur.MUST).build();
int numDocsBefore = writer.numDocs();
writer.deleteDocuments(query);
writer.commit();
closeSearcher();
int numDocsAfter = writer.numDocs();
if (numDocsBefore == numDocsAfter) {
log.debug(MessageFormat.format("no records found to delete in {0}", repository));
return false;
} else {
log.debug(MessageFormat.format("deleted {0} records in {1}", numDocsBefore - numDocsAfter, repository));
return true;
}
}
Aggregations