use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.
the class AbstractFieldDataTestCase method setup.
@Before
public void setup() throws Exception {
// we need 2.x so that fielddata is allowed on string fields
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_0);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
indexService = createIndex("test", settings);
mapperService = indexService.mapperService();
indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache();
ifdService = indexService.fieldData();
// LogByteSizeMP to preserve doc ID order
writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class TicketIndexer method queryFor.
/**
* Search for tickets matching the query. The returned tickets are
* shadows of the real ticket, but suitable for a results list.
*
* @param text
* @param page
* @param pageSize
* @param sortBy
* @param desc
* @return
*/
public List<QueryResult> queryFor(String queryText, int page, int pageSize, String sortBy, boolean desc) {
if (StringUtils.isEmpty(queryText)) {
return Collections.emptyList();
}
Set<QueryResult> results = new LinkedHashSet<QueryResult>();
StandardAnalyzer analyzer = new StandardAnalyzer();
try {
QueryParser qp = new QueryParser(Lucene.content.name(), analyzer);
Query query = qp.parse(queryText);
IndexSearcher searcher = getSearcher();
Query rewrittenQuery = searcher.rewrite(query);
log.debug(rewrittenQuery.toString());
Sort sort;
if (sortBy == null) {
sort = new Sort(Lucene.created.asSortField(desc));
} else {
sort = new Sort(Lucene.fromString(sortBy).asSortField(desc));
}
int maxSize = 5000;
TopFieldDocs docs = searcher.search(rewrittenQuery, maxSize, sort, false, false);
int size = (pageSize <= 0) ? maxSize : pageSize;
int offset = Math.max(0, (page - 1) * size);
ScoreDoc[] hits = subset(docs.scoreDocs, offset, size);
for (int i = 0; i < hits.length; i++) {
int docId = hits[i].doc;
Document doc = searcher.doc(docId);
QueryResult result = docToQueryResult(doc);
result.docId = docId;
result.totalResults = docs.totalHits;
results.add(result);
}
} catch (Exception e) {
log.error(MessageFormat.format("Exception while searching for {0}", queryText), e);
}
return new ArrayList<QueryResult>(results);
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class TicketIndexer method deleteAll.
/**
* Deletes all tickets for the the repository from the index.
*/
public boolean deleteAll(RepositoryModel repository) {
try {
IndexWriter writer = getWriter();
StandardAnalyzer analyzer = new StandardAnalyzer();
QueryParser qp = new QueryParser(Lucene.rid.name(), analyzer);
BooleanQuery query = new BooleanQuery.Builder().add(qp.parse(repository.getRID()), Occur.MUST).build();
int numDocsBefore = writer.numDocs();
writer.deleteDocuments(query);
writer.commit();
closeSearcher();
int numDocsAfter = writer.numDocs();
if (numDocsBefore == numDocsAfter) {
log.debug(MessageFormat.format("no records found to delete in {0}", repository));
return false;
} else {
log.debug(MessageFormat.format("deleted {0} records in {1}", numDocsBefore - numDocsAfter, repository));
return true;
}
} catch (Exception e) {
log.error("error", e);
}
return false;
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class LuceneService method getIndexWriter.
/**
* Gets an index writer for the repository. The index will be created if it
* does not already exist or if forceCreate is specified.
*
* @param repository
* @return an IndexWriter
* @throws IOException
*/
private IndexWriter getIndexWriter(String repository) throws IOException {
IndexWriter indexWriter = writers.get(repository);
if (indexWriter == null) {
File repositoryFolder = FileKey.resolve(new File(repositoriesFolder, repository), FS.DETECTED);
LuceneRepoIndexStore indexStore = new LuceneRepoIndexStore(repositoryFolder, INDEX_VERSION);
indexStore.create();
Directory directory = FSDirectory.open(indexStore.getPath());
StandardAnalyzer analyzer = new StandardAnalyzer();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
config.setOpenMode(OpenMode.CREATE_OR_APPEND);
indexWriter = new IndexWriter(directory, config);
writers.put(repository, indexWriter);
}
return indexWriter;
}
use of org.apache.lucene.analysis.standard.StandardAnalyzer in project gitblit by gitblit.
the class LuceneService method deleteBlob.
/**
* Delete a blob from the specified branch of the repository index.
*
* @param repositoryName
* @param branch
* @param path
* @throws Exception
* @return true, if deleted, false if no record was deleted
*/
public boolean deleteBlob(String repositoryName, String branch, String path) throws Exception {
String pattern = MessageFormat.format("{0}:'{'0} AND {1}:\"'{'1'}'\" AND {2}:\"'{'2'}'\"", FIELD_OBJECT_TYPE, FIELD_BRANCH, FIELD_PATH);
String q = MessageFormat.format(pattern, SearchObjectType.blob.name(), branch, path);
StandardAnalyzer analyzer = new StandardAnalyzer();
QueryParser qp = new QueryParser(FIELD_SUMMARY, analyzer);
BooleanQuery query = new BooleanQuery.Builder().add(qp.parse(q), Occur.MUST).build();
IndexWriter writer = getIndexWriter(repositoryName);
int numDocsBefore = writer.numDocs();
writer.deleteDocuments(query);
writer.commit();
int numDocsAfter = writer.numDocs();
if (numDocsBefore == numDocsAfter) {
logger.debug(MessageFormat.format("no records found to delete {0}", query.toString()));
return false;
} else {
logger.debug(MessageFormat.format("deleted {0} records with {1}", numDocsBefore - numDocsAfter, query.toString()));
return true;
}
}
Aggregations