use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class ParentFieldSubFetchPhaseTests method testGetParentId.
public void testGetParentId() throws Exception {
ParentFieldMapper fieldMapper = createParentFieldMapper();
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig());
Document document = new Document();
document.add(new SortedDocValuesField(fieldMapper.fieldType().name(), new BytesRef("1")));
indexWriter.addDocument(document);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0);
assertEquals("1", id);
indexReader.close();
directory.close();
}
use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class QueryPhaseTests method countTestCase.
private void countTestCase(boolean withDeletions) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final int numDocs = scaledRandomIntBetween(100, 200);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (randomBoolean()) {
doc.add(new StringField("foo", "bar", Store.NO));
}
if (randomBoolean()) {
doc.add(new StringField("foo", "baz", Store.NO));
}
if (withDeletions && (rarely() || i == 0)) {
doc.add(new StringField("delete", "yes", Store.NO));
}
w.addDocument(doc);
}
if (withDeletions) {
w.deleteDocuments(new Term("delete", "yes"));
}
final IndexReader reader = w.getReader();
Query matchAll = new MatchAllDocsQuery();
Query matchAllCsq = new ConstantScoreQuery(matchAll);
Query tq = new TermQuery(new Term("foo", "bar"));
Query tCsq = new ConstantScoreQuery(tq);
BooleanQuery bq = new BooleanQuery.Builder().add(matchAll, Occur.SHOULD).add(tq, Occur.MUST).build();
countTestCase(matchAll, reader, false);
countTestCase(matchAllCsq, reader, false);
countTestCase(tq, reader, withDeletions);
countTestCase(tCsq, reader, withDeletions);
countTestCase(bq, reader, true);
reader.close();
w.close();
dir.close();
}
use of org.apache.lucene.index.IndexReader in project neo4j by neo4j.
the class FullTxData method searcher.
private IndexSearcher searcher(boolean allowRefreshSearcher) {
if (this.searcher != null && (!modified || !allowRefreshSearcher)) {
return this.searcher;
}
try {
IndexReader newReader = this.reader == null ? DirectoryReader.open(this.writer) : DirectoryReader.openIfChanged((DirectoryReader) this.reader);
if (newReader == null) {
return this.searcher;
}
LuceneUtil.close(reader);
this.reader = newReader;
LuceneUtil.close(searcher);
searcher = new IndexSearcher(reader);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (allowRefreshSearcher) {
this.modified = false;
}
}
return this.searcher;
}
use of org.apache.lucene.index.IndexReader in project textdb by TextDB.
the class WordCountIndexSource method computeWordCount.
private void computeWordCount() throws TextDBException {
try {
HashMap<String, Integer> wordCountMap = new HashMap<>();
DataReader dataReader = RelationManager.getRelationManager().getTableDataReader(predicate.getTableName(), new MatchAllDocsQuery());
dataReader.open();
IndexReader luceneIndexReader = dataReader.getLuceneIndexReader();
for (int i = 0; i < luceneIndexReader.numDocs(); i++) {
Terms termVector = luceneIndexReader.getTermVector(i, predicate.getAttribute());
TermsEnum termsEnum = termVector.iterator();
while (termsEnum.next() != null) {
String key = termsEnum.term().utf8ToString();
wordCountMap.put(key, wordCountMap.get(key) == null ? ((int) termsEnum.totalTermFreq()) : wordCountMap.get(key) + ((int) termsEnum.totalTermFreq()));
}
}
luceneIndexReader.close();
dataReader.close();
sortedWordCountMap = wordCountMap.entrySet().stream().sorted((e1, e2) -> e2.getValue().compareTo(e1.getValue())).collect(Collectors.toList());
wordCountIterator = sortedWordCountMap.iterator();
} catch (IOException e) {
throw new DataFlowException(e);
}
}
use of org.apache.lucene.index.IndexReader in project intellij-community by JetBrains.
the class Maven2ServerIndexerImpl method processArtifacts.
public void processArtifacts(int indexId, MavenServerIndicesProcessor processor) throws MavenServerIndexerException {
try {
final int CHUNK_SIZE = 10000;
IndexReader r = getIndex(indexId).getIndexReader();
int total = r.numDocs();
List<MavenId> result = new ArrayList<MavenId>(Math.min(CHUNK_SIZE, total));
for (int i = 0; i < total; i++) {
if (r.isDeleted(i))
continue;
Document doc = r.document(i);
String uinfo = doc.get(SEARCH_TERM_COORDINATES);
if (uinfo == null)
continue;
List<String> parts = StringUtil.split(uinfo, "|");
String groupId = parts.get(0);
String artifactId = parts.get(1);
String version = parts.get(2);
if (groupId == null || artifactId == null || version == null)
continue;
result.add(new MavenId(groupId, artifactId, version));
if (result.size() == CHUNK_SIZE) {
processor.processArtifacts(result);
result.clear();
}
}
if (!result.isEmpty()) {
processor.processArtifacts(result);
}
} catch (Exception e) {
throw new MavenServerIndexerException(wrapException(e));
}
}
Aggregations