use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class TicketIndexer method searchFor.
/**
* Search for tickets matching the query. The returned tickets are
* shadows of the real ticket, but suitable for a results list.
*
* @param repository
* @param text
* @param page
* @param pageSize
* @return search results
*/
public List<QueryResult> searchFor(RepositoryModel repository, String text, int page, int pageSize) {
if (StringUtils.isEmpty(text)) {
return Collections.emptyList();
}
Set<QueryResult> results = new LinkedHashSet<QueryResult>();
StandardAnalyzer analyzer = new StandardAnalyzer();
try {
// search the title, description and content
BooleanQuery.Builder bldr = new BooleanQuery.Builder();
QueryParser qp;
qp = new QueryParser(Lucene.title.name(), analyzer);
qp.setAllowLeadingWildcard(true);
bldr.add(qp.parse(text), Occur.SHOULD);
qp = new QueryParser(Lucene.body.name(), analyzer);
qp.setAllowLeadingWildcard(true);
bldr.add(qp.parse(text), Occur.SHOULD);
qp = new QueryParser(Lucene.content.name(), analyzer);
qp.setAllowLeadingWildcard(true);
bldr.add(qp.parse(text), Occur.SHOULD);
IndexSearcher searcher = getSearcher();
Query rewrittenQuery = searcher.rewrite(bldr.build());
log.debug(rewrittenQuery.toString());
TopScoreDocCollector collector = TopScoreDocCollector.create(5000);
searcher.search(rewrittenQuery, collector);
int offset = Math.max(0, (page - 1) * pageSize);
ScoreDoc[] hits = collector.topDocs(offset, pageSize).scoreDocs;
for (int i = 0; i < hits.length; i++) {
int docId = hits[i].doc;
Document doc = searcher.doc(docId);
QueryResult result = docToQueryResult(doc);
if (repository != null) {
if (!result.repository.equalsIgnoreCase(repository.name)) {
continue;
}
}
results.add(result);
}
} catch (Exception e) {
log.error(MessageFormat.format("Exception while searching for {0}", text), e);
}
return new ArrayList<QueryResult>(results);
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class TicketIndexer method getWriter.
private IndexWriter getWriter() throws IOException {
if (writer == null) {
indexStore.create();
Directory directory = FSDirectory.open(indexStore.getPath());
StandardAnalyzer analyzer = new StandardAnalyzer();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
config.setOpenMode(OpenMode.CREATE_OR_APPEND);
writer = new IndexWriter(directory, config);
}
return writer;
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project textdb by TextDB.
the class RelationManagerTest method test1.
/*
* Test the information about "table catalog" itself is stored properly.
*
*/
@Test
public void test1() throws Exception {
String tableCatalogDirectory = relationManager.getTableDirectory(CatalogConstants.TABLE_CATALOG);
Analyzer tableCatalogLuceneAnalyzer = relationManager.getTableAnalyzer(CatalogConstants.TABLE_CATALOG);
Schema tableCatalogSchema = relationManager.getTableSchema(CatalogConstants.TABLE_CATALOG);
Assert.assertEquals(tableCatalogDirectory, new File(CatalogConstants.TABLE_CATALOG_DIRECTORY).getCanonicalPath());
Assert.assertTrue(tableCatalogLuceneAnalyzer instanceof StandardAnalyzer);
Assert.assertEquals(tableCatalogSchema, Utils.getSchemaWithID(CatalogConstants.TABLE_CATALOG_SCHEMA));
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project geode by apache.
the class LuceneServiceImpl method createIndex.
public void createIndex(String indexName, String regionPath, Map<String, Analyzer> fieldAnalyzers) {
if (fieldAnalyzers == null || fieldAnalyzers.isEmpty()) {
throw new IllegalArgumentException("At least one field must be indexed");
}
Analyzer analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), fieldAnalyzers);
Set<String> fieldsSet = fieldAnalyzers.keySet();
String[] fields = (String[]) fieldsSet.toArray(new String[fieldsSet.size()]);
createIndex(indexName, regionPath, analyzer, fieldAnalyzers, fields);
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project geode by apache.
the class LuceneIndexCreation method beforeCreate.
@Override
public void beforeCreate(Extensible<Region<?, ?>> source, Cache cache) {
LuceneServiceImpl service = (LuceneServiceImpl) LuceneServiceProvider.get(cache);
Analyzer analyzer = this.fieldAnalyzers == null ? new StandardAnalyzer() : new PerFieldAnalyzerWrapper(new StandardAnalyzer(), this.fieldAnalyzers);
try {
service.createIndex(getName(), getRegionPath(), analyzer, this.fieldAnalyzers, getFieldNames());
} catch (LuceneIndexExistsException e) {
logger.info(LocalizedStrings.LuceneIndexCreation_IGNORING_DUPLICATE_INDEX_CREATION_0_ON_REGION_1.toLocalizedString(e.getIndexName(), e.getRegionPath()));
}
}
Aggregations