use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class PercolateQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, false);
final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, false);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> set) {
}
@Override
public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
Scorer scorer = scorer(leafReaderContext);
if (scorer != null) {
TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator();
int result = twoPhaseIterator.approximation().advance(docId);
if (result == docId) {
if (twoPhaseIterator.matches()) {
if (needsScores) {
CheckedFunction<Integer, Query, IOException> percolatorQueries = queryStore.getQueries(leafReaderContext);
Query query = percolatorQueries.apply(docId);
Explanation detail = percolatorIndexSearcher.explain(query, 0);
return Explanation.match(scorer.score(), "PercolateQuery", detail);
} else {
return Explanation.match(scorer.score(), "PercolateQuery");
}
}
}
}
return Explanation.noMatch("PercolateQuery");
}
@Override
public float getValueForNormalization() throws IOException {
return candidateMatchesWeight.getValueForNormalization();
}
@Override
public void normalize(float v, float v1) {
candidateMatchesWeight.normalize(v, v1);
}
@Override
public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException {
final Scorer approximation = candidateMatchesWeight.scorer(leafReaderContext);
if (approximation == null) {
return null;
}
final CheckedFunction<Integer, Query, IOException> queries = queryStore.getQueries(leafReaderContext);
if (needsScores) {
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
float score;
@Override
boolean matchDocId(int docId) throws IOException {
Query query = percolatorQueries.apply(docId);
if (query != null) {
TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
if (topDocs.totalHits > 0) {
score = topDocs.scoreDocs[0].score;
return true;
} else {
return false;
}
} else {
return false;
}
}
@Override
public float score() throws IOException {
return score;
}
};
} else {
Scorer verifiedDocsScorer = verifiedMatchesWeight.scorer(leafReaderContext);
Bits verifiedDocsBits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), verifiedDocsScorer);
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
@Override
public float score() throws IOException {
return 0f;
}
boolean matchDocId(int docId) throws IOException {
// the MemoryIndex verification.
if (verifiedDocsBits.get(docId)) {
return true;
}
Query query = percolatorQueries.apply(docId);
return query != null && Lucene.exists(percolatorIndexSearcher, query);
}
};
}
}
};
}
use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class ParentChildFieldDataTests method testThreads.
public void testThreads() throws Exception {
final ParentChildIndexFieldData indexFieldData = getForField(childType);
final DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId(new Index("test", ""), 0));
final IndexParentChildFieldData global = indexFieldData.loadGlobal(reader);
final AtomicReference<Exception> error = new AtomicReference<>();
final int numThreads = scaledRandomIntBetween(3, 8);
final Thread[] threads = new Thread[numThreads];
final CountDownLatch latch = new CountDownLatch(1);
final Map<Object, BytesRef[]> expected = new HashMap<>();
for (LeafReaderContext context : reader.leaves()) {
AtomicParentChildFieldData leafData = global.load(context);
SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
final BytesRef[] ids = new BytesRef[parentIds.getValueCount()];
for (int j = 0; j < parentIds.getValueCount(); ++j) {
final BytesRef id = parentIds.lookupOrd(j);
if (id != null) {
ids[j] = BytesRef.deepCopyOf(id);
}
}
expected.put(context.reader().getCoreCacheKey(), ids);
}
for (int i = 0; i < numThreads; ++i) {
threads[i] = new Thread() {
@Override
public void run() {
try {
latch.await();
for (int i = 0; i < 100000; ++i) {
for (LeafReaderContext context : reader.leaves()) {
AtomicParentChildFieldData leafData = global.load(context);
SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
final BytesRef[] expectedIds = expected.get(context.reader().getCoreCacheKey());
for (int j = 0; j < parentIds.getValueCount(); ++j) {
final BytesRef id = parentIds.lookupOrd(j);
assertEquals(expectedIds[j], id);
}
}
}
} catch (Exception e) {
error.compareAndSet(null, e);
}
}
};
threads[i].start();
}
latch.countDown();
for (Thread thread : threads) {
thread.join();
}
if (error.get() != null) {
throw error.get();
}
}
use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class AbstractFieldDataImplTestCase method testDeletedDocs.
public void testDeletedDocs() throws Exception {
add2SingleValuedDocumentsAndDeleteOneOfThem();
IndexFieldData indexFieldData = getForField("value");
List<LeafReaderContext> readerContexts = refreshReader();
for (LeafReaderContext readerContext : readerContexts) {
AtomicFieldData fieldData = indexFieldData.load(readerContext);
SortedBinaryDocValues values = fieldData.getBytesValues();
for (int i = 0; i < readerContext.reader().maxDoc(); ++i) {
values.setDocument(i);
assertThat(values.count(), greaterThanOrEqualTo(1));
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class GeoFieldDataTests method testSingleValueAllSet.
@Override
public void testSingleValueAllSet() throws Exception {
fillSingleValueAllSet();
IndexFieldData indexFieldData = getForField("value");
List<LeafReaderContext> readerContexts = refreshReader();
for (LeafReaderContext readerContext : readerContexts) {
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
MultiGeoPointValues fieldValues = ((AbstractAtomicGeoPointFieldData) fieldData).getGeoPointValues();
assertValues(fieldValues, 0);
assertValues(fieldValues, 1);
assertValues(fieldValues, 2);
}
}
use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class GeoFieldDataTests method testSingleValueWithMissing.
@Override
public void testSingleValueWithMissing() throws Exception {
fillSingleValueWithMissing();
IndexFieldData indexFieldData = getForField("value");
List<LeafReaderContext> readerContexts = refreshReader();
for (LeafReaderContext readerContext : readerContexts) {
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
MultiGeoPointValues fieldValues = ((AbstractAtomicGeoPointFieldData) fieldData).getGeoPointValues();
assertValues(fieldValues, 0);
assertMissing(fieldValues, 1);
assertValues(fieldValues, 2);
}
}
Aggregations