use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class DocSetUtil method createDocSet.
public static DocSet createDocSet(SolrIndexSearcher searcher, Term term) throws IOException {
// raw reader to avoid extra wrapping overhead
DirectoryReader reader = searcher.getRawReader();
int maxDoc = searcher.getIndexReader().maxDoc();
int smallSetSize = smallSetSize(maxDoc);
String field = term.field();
BytesRef termVal = term.bytes();
int maxCount = 0;
int firstReader = -1;
List<LeafReaderContext> leaves = reader.leaves();
// use array for slightly higher scanning cost, but fewer memory allocations
PostingsEnum[] postList = new PostingsEnum[leaves.size()];
for (LeafReaderContext ctx : leaves) {
assert leaves.get(ctx.ord) == ctx;
LeafReader r = ctx.reader();
Fields f = r.fields();
Terms t = f.terms(field);
// field is missing
if (t == null)
continue;
TermsEnum te = t.iterator();
if (te.seekExact(termVal)) {
maxCount += te.docFreq();
postList[ctx.ord] = te.postings(null, PostingsEnum.NONE);
if (firstReader < 0)
firstReader = ctx.ord;
}
}
DocSet answer = null;
if (maxCount == 0) {
answer = DocSet.EMPTY;
} else if (maxCount <= smallSetSize) {
answer = createSmallSet(leaves, postList, maxCount, firstReader);
} else {
answer = createBigSet(leaves, postList, maxDoc, firstReader);
}
return DocSetUtil.getDocSet(answer, searcher);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class TrieFloatField method getSingleValueSource.
@Override
protected ValueSource getSingleValueSource(SortedSetSelector.Type choice, SchemaField f) {
return new SortedSetFieldSource(f.getName(), choice) {
@Override
public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
// needed for nested anon class ref
SortedSetFieldSource thisAsSortedSetFieldSource = this;
SortedSetDocValues sortedSet = DocValues.getSortedSet(readerContext.reader(), field);
SortedDocValues view = SortedSetSelector.wrap(sortedSet, selector);
return new FloatDocValues(thisAsSortedSetFieldSource) {
private int lastDocID;
private boolean setDoc(int docID) throws IOException {
if (docID < lastDocID) {
throw new IllegalArgumentException("docs out of order: lastDocID=" + lastDocID + " docID=" + docID);
}
if (docID > view.docID()) {
return docID == view.advance(docID);
} else {
return docID == view.docID();
}
}
@Override
public float floatVal(int doc) throws IOException {
if (setDoc(doc)) {
BytesRef bytes = view.binaryValue();
assert bytes.length > 0;
return NumericUtils.sortableIntToFloat(LegacyNumericUtils.prefixCodedToInt(bytes));
} else {
return 0F;
}
}
@Override
public boolean exists(int doc) throws IOException {
return setDoc(doc);
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueFloat mval = new MutableValueFloat();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) throws IOException {
if (setDoc(doc)) {
mval.exists = true;
mval.value = NumericUtils.sortableIntToFloat(LegacyNumericUtils.prefixCodedToInt(view.binaryValue()));
} else {
mval.exists = false;
mval.value = 0F;
}
}
};
}
};
}
};
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class ConcurrentSortedSetDocValuesFacetCounts method countAll.
/** Does all the "real work" of tallying up the counts. */
private final void countAll() throws IOException, InterruptedException {
//System.out.println("ssdv count");
MultiDocValues.OrdinalMap ordinalMap;
// matchingDocs...
if (dv instanceof MultiDocValues.MultiSortedSetDocValues) {
ordinalMap = ((MultiSortedSetDocValues) dv).mapping;
} else {
ordinalMap = null;
}
List<Future<Void>> results = new ArrayList<>();
for (LeafReaderContext context : state.getReader().leaves()) {
results.add(exec.submit(new CountOneSegment(context.reader(), null, ordinalMap, context.ord)));
}
for (Future<Void> result : results) {
try {
result.get();
} catch (ExecutionException ee) {
// Theoretically cause can be null; guard against that.
Throwable cause = ee.getCause();
throw IOUtils.rethrowAlways(cause != null ? cause : ee);
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class TaxonomyMergeUtils method merge.
/**
* Merges the given taxonomy and index directories and commits the changes to
* the given writers.
*/
public static void merge(Directory srcIndexDir, Directory srcTaxoDir, OrdinalMap map, IndexWriter destIndexWriter, DirectoryTaxonomyWriter destTaxoWriter, FacetsConfig srcConfig) throws IOException {
// merge the taxonomies
destTaxoWriter.addTaxonomy(srcTaxoDir, map);
int[] ordinalMap = map.getMap();
DirectoryReader reader = DirectoryReader.open(srcIndexDir);
try {
List<LeafReaderContext> leaves = reader.leaves();
int numReaders = leaves.size();
CodecReader[] wrappedLeaves = new CodecReader[numReaders];
for (int i = 0; i < numReaders; i++) {
wrappedLeaves[i] = SlowCodecReaderWrapper.wrap(new OrdinalMappingLeafReader(leaves.get(i).reader(), ordinalMap, srcConfig));
}
destIndexWriter.addIndexes(wrappedLeaves);
// commit changes to taxonomy and index respectively.
destTaxoWriter.commit();
destIndexWriter.commit();
} finally {
reader.close();
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class DrillSidewaysQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight baseWeight = baseQuery.createWeight(searcher, needsScores, boost);
final Weight[] drillDowns = new Weight[drillDownQueries.length];
for (int dim = 0; dim < drillDownQueries.length; dim++) {
drillDowns[dim] = searcher.createNormalizedWeight(drillDownQueries[dim], false);
}
return new Weight(DrillSidewaysQuery.this) {
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return baseWeight.explain(context, doc);
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
// We can only run as a top scorer:
throw new UnsupportedOperationException();
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
Scorer baseScorer = baseWeight.scorer(context);
DrillSidewaysScorer.DocsAndCost[] dims = new DrillSidewaysScorer.DocsAndCost[drillDowns.length];
int nullCount = 0;
for (int dim = 0; dim < dims.length; dim++) {
Scorer scorer = drillDowns[dim].scorer(context);
if (scorer == null) {
nullCount++;
scorer = new ConstantScoreScorer(drillDowns[dim], 0f, DocIdSetIterator.empty());
}
dims[dim] = new DrillSidewaysScorer.DocsAndCost(scorer, drillSidewaysCollectors[dim]);
}
//if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) {
if (nullCount > 1) {
return null;
}
// Sort drill-downs by most restrictive first:
Arrays.sort(dims, new Comparator<DrillSidewaysScorer.DocsAndCost>() {
@Override
public int compare(DocsAndCost o1, DocsAndCost o2) {
return Long.compare(o1.approximation.cost(), o2.approximation.cost());
}
});
if (baseScorer == null) {
return null;
}
return new DrillSidewaysScorer(context, baseScorer, drillDownCollector, dims, scoreSubDocsAtOnce);
}
};
}
Aggregations