use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class ExportWriter method writeDoc.
protected void writeDoc(SortDoc sortDoc, List<LeafReaderContext> leaves, EntryWriter ew) throws IOException {
int ord = sortDoc.ord;
FixedBitSet set = sets[ord];
set.clear(sortDoc.docId);
LeafReaderContext context = leaves.get(ord);
int fieldIndex = 0;
for (FieldWriter fieldWriter : fieldWriters) {
if (fieldWriter.write(sortDoc.docId, context.reader(), ew, fieldIndex)) {
++fieldIndex;
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class ValueSourceAugmenter method transform.
@Override
public void transform(SolrDocument doc, int docid, float score) {
try {
// TODO: calculate this stuff just once across diff functions
int idx = ReaderUtil.subIndex(docid, readerContexts);
LeafReaderContext rcontext = readerContexts.get(idx);
FunctionValues values = valueSource.getValues(fcontext, rcontext);
int localId = docid - rcontext.docBase;
setValue(doc, values.objectVal(localId));
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class SolrDocumentFetcher method decorateDocValueFields.
/**
* This will fetch and add the docValues fields to a given SolrDocument/SolrInputDocument
*
* @param doc
* A SolrDocument or SolrInputDocument instance where docValues will be added
* @param docid
* The lucene docid of the document to be populated
* @param fields
* The list of docValues fields to be decorated
*/
public void decorateDocValueFields(@SuppressWarnings("rawtypes") SolrDocumentBase doc, int docid, Set<String> fields) throws IOException {
final List<LeafReaderContext> leafContexts = searcher.getLeafContexts();
final int subIndex = ReaderUtil.subIndex(docid, leafContexts);
final int localId = docid - leafContexts.get(subIndex).docBase;
final LeafReader leafReader = leafContexts.get(subIndex).reader();
for (String fieldName : fields) {
final SchemaField schemaField = searcher.getSchema().getFieldOrNull(fieldName);
if (schemaField == null || !schemaField.hasDocValues() || doc.containsKey(fieldName)) {
log.warn("Couldn't decorate docValues for field: [{}], schemaField: [{}]", fieldName, schemaField);
continue;
}
FieldInfo fi = searcher.getFieldInfos().fieldInfo(fieldName);
if (fi == null) {
// Searcher doesn't have info about this field, hence ignore it.
continue;
}
final DocValuesType dvType = fi.getDocValuesType();
switch(dvType) {
case NUMERIC:
final NumericDocValues ndv = leafReader.getNumericDocValues(fieldName);
if (ndv == null) {
continue;
}
Long val;
if (ndv.advanceExact(localId)) {
val = ndv.longValue();
} else {
continue;
}
Object newVal = val;
if (schemaField.getType().isPointField()) {
// TODO: Maybe merge PointField with TrieFields here
NumberType type = schemaField.getType().getNumberType();
switch(type) {
case INTEGER:
newVal = val.intValue();
break;
case LONG:
newVal = val.longValue();
break;
case FLOAT:
newVal = Float.intBitsToFloat(val.intValue());
break;
case DOUBLE:
newVal = Double.longBitsToDouble(val);
break;
case DATE:
newVal = new Date(val);
break;
default:
throw new AssertionError("Unexpected PointType: " + type);
}
} else {
if (schemaField.getType() instanceof TrieIntField) {
newVal = val.intValue();
} else if (schemaField.getType() instanceof TrieFloatField) {
newVal = Float.intBitsToFloat(val.intValue());
} else if (schemaField.getType() instanceof TrieDoubleField) {
newVal = Double.longBitsToDouble(val);
} else if (schemaField.getType() instanceof TrieDateField) {
newVal = new Date(val);
} else if (schemaField.getType() instanceof EnumField) {
newVal = ((EnumField) schemaField.getType()).intValueToStringValue(val.intValue());
}
}
doc.addField(fieldName, newVal);
break;
case BINARY:
BinaryDocValues bdv = leafReader.getBinaryDocValues(fieldName);
if (bdv == null) {
continue;
}
BytesRef value;
if (bdv.advanceExact(localId)) {
value = BytesRef.deepCopyOf(bdv.binaryValue());
} else {
continue;
}
doc.addField(fieldName, value);
break;
case SORTED:
SortedDocValues sdv = leafReader.getSortedDocValues(fieldName);
if (sdv == null) {
continue;
}
if (sdv.advanceExact(localId)) {
final BytesRef bRef = sdv.binaryValue();
// Special handling for Boolean fields since they're stored as 'T' and 'F'.
if (schemaField.getType() instanceof BoolField) {
doc.addField(fieldName, schemaField.getType().toObject(schemaField, bRef));
} else {
doc.addField(fieldName, bRef.utf8ToString());
}
}
break;
case SORTED_NUMERIC:
final SortedNumericDocValues numericDv = leafReader.getSortedNumericDocValues(fieldName);
NumberType type = schemaField.getType().getNumberType();
if (numericDv != null) {
if (numericDv.advance(localId) == localId) {
final List<Object> outValues = new ArrayList<Object>(numericDv.docValueCount());
for (int i = 0; i < numericDv.docValueCount(); i++) {
long number = numericDv.nextValue();
switch(type) {
case INTEGER:
outValues.add((int) number);
break;
case LONG:
outValues.add(number);
break;
case FLOAT:
outValues.add(NumericUtils.sortableIntToFloat((int) number));
break;
case DOUBLE:
outValues.add(NumericUtils.sortableLongToDouble(number));
break;
case DATE:
outValues.add(new Date(number));
break;
default:
throw new AssertionError("Unexpected PointType: " + type);
}
}
assert outValues.size() > 0;
doc.addField(fieldName, outValues);
}
}
case SORTED_SET:
final SortedSetDocValues values = leafReader.getSortedSetDocValues(fieldName);
if (values != null && values.getValueCount() > 0) {
if (values.advance(localId) == localId) {
final List<Object> outValues = new LinkedList<>();
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
value = values.lookupOrd(ord);
outValues.add(schemaField.getType().toObject(schemaField, value));
}
assert outValues.size() > 0;
doc.addField(fieldName, outValues);
}
}
case NONE:
break;
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class SolrIndexSearcher method getDocSet.
/**
* Returns the set of document ids matching all queries. This method is cache-aware and attempts to retrieve the
* answer from the cache if possible. If the answer was not cached, it may have been inserted into the cache as a
* result of this call. This method can handle negative queries.
* <p>
* The DocSet returned should <b>not</b> be modified.
*/
public DocSet getDocSet(List<Query> queries) throws IOException {
if (queries != null) {
for (Query q : queries) {
if (q instanceof ScoreFilter) {
return getDocSetScore(queries);
}
}
}
ProcessedFilter pf = getProcessedFilter(null, queries);
if (pf.answer != null)
return pf.answer;
DocSetCollector setCollector = new DocSetCollector(maxDoc());
Collector collector = setCollector;
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
for (final LeafReaderContext leaf : leafContexts) {
final LeafReader reader = leaf.reader();
Bits liveDocs = reader.getLiveDocs();
DocIdSet idSet = null;
if (pf.filter != null) {
idSet = pf.filter.getDocIdSet(leaf, liveDocs);
if (idSet == null)
continue;
}
DocIdSetIterator idIter = null;
if (idSet != null) {
idIter = idSet.iterator();
if (idIter == null)
continue;
// no need to check liveDocs
if (!pf.hasDeletedDocs)
liveDocs = null;
}
final LeafCollector leafCollector = collector.getLeafCollector(leaf);
int max = reader.maxDoc();
if (idIter == null) {
for (int docid = 0; docid < max; docid++) {
if (liveDocs != null && !liveDocs.get(docid))
continue;
leafCollector.collect(docid);
}
} else {
if (liveDocs != null) {
for (int docid = -1; (docid = idIter.advance(docid + 1)) < max; ) {
if (liveDocs.get(docid))
leafCollector.collect(docid);
}
} else {
for (int docid = -1; (docid = idIter.advance(docid + 1)) < max; ) {
leafCollector.collect(docid);
}
}
}
}
if (collector instanceof DelegatingCollector) {
((DelegatingCollector) collector).finish();
}
return DocSetUtil.getDocSet(setCollector, this);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class SolrIndexSearcher method lookupId.
/**
* lookup the docid by the unique key field, and return the id *within* the leaf reader in the low 32 bits, and the
* index of the leaf reader in the high 32 bits. -1 is returned if not found.
*
* @lucene.internal
*/
public long lookupId(BytesRef idBytes) throws IOException {
String field = schema.getUniqueKeyField().getName();
for (int i = 0, c = leafContexts.size(); i < c; i++) {
final LeafReaderContext leaf = leafContexts.get(i);
final LeafReader reader = leaf.reader();
final Terms terms = reader.terms(field);
if (terms == null)
continue;
TermsEnum te = terms.iterator();
if (te.seekExact(idBytes)) {
PostingsEnum docs = te.postings(null, PostingsEnum.NONE);
docs = BitsFilteredPostingsEnum.wrap(docs, reader.getLiveDocs());
int id = docs.nextDoc();
if (id == DocIdSetIterator.NO_MORE_DOCS)
continue;
assert docs.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
return (((long) i) << 32) | id;
}
}
return -1;
}
Aggregations