use of org.opensearch.search.aggregations.LeafBucketCollector in project OpenSearch by opensearch-project.
the class ParentJoinAggregator method getLeafCollector.
@Override
public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx);
final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), inFilter.scorerSupplier(ctx));
return new LeafBucketCollector() {
@Override
public void collect(int docId, long owningBucketOrd) throws IOException {
if (parentDocs.get(docId) && globalOrdinals.advanceExact(docId)) {
int globalOrdinal = (int) globalOrdinals.nextOrd();
assert globalOrdinal != -1 && globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS;
collectionStrategy.add(owningBucketOrd, globalOrdinal);
}
}
};
}
use of org.opensearch.search.aggregations.LeafBucketCollector in project OpenSearch by opensearch-project.
the class BinaryValuesSource method getLeafCollector.
@Override
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException {
final SortedBinaryDocValues dvs = docValuesFunc.apply(context);
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
currentValue = dvs.nextValue();
next.collect(doc, bucket);
}
} else if (missingBucket) {
currentValue = null;
next.collect(doc, bucket);
}
}
};
}
use of org.opensearch.search.aggregations.LeafBucketCollector in project OpenSearch by opensearch-project.
the class SortedDocsProducer method processBucket.
/**
* Visits all non-deleted documents in <code>iterator</code> and fills the provided <code>queue</code>
* with the top composite buckets extracted from the collection.
* Documents that contain a top composite bucket are added in the provided <code>builder</code> if it is not null.
*
* Returns true if the queue is full and the current <code>leadSourceBucket</code> did not produce any competitive
* composite buckets.
*/
protected boolean processBucket(CompositeValuesCollectorQueue queue, LeafReaderContext context, DocIdSetIterator iterator, Comparable leadSourceBucket, @Nullable DocIdSetBuilder builder) throws IOException {
final int[] topCompositeCollected = new int[1];
final boolean[] hasCollected = new boolean[1];
final LeafBucketCollector queueCollector = new LeafBucketCollector() {
int lastDoc = -1;
// we need to add the matching document in the builder
// so we build a bulk adder from the approximate cost of the iterator
// and rebuild the adder during the collection if needed
int remainingBits = (int) Math.min(iterator.cost(), Integer.MAX_VALUE);
DocIdSetBuilder.BulkAdder adder = builder == null ? null : builder.grow(remainingBits);
@Override
public void collect(int doc, long bucket) throws IOException {
hasCollected[0] = true;
if (queue.addIfCompetitive()) {
topCompositeCollected[0]++;
if (adder != null && doc != lastDoc) {
if (remainingBits == 0) {
// the cost approximation was lower than the real size, we need to grow the adder
// by some numbers (128) to ensure that we can add the extra documents
adder = builder.grow(128);
remainingBits = 128;
}
adder.add(doc);
remainingBits--;
lastDoc = doc;
}
}
}
};
final Bits liveDocs = context.reader().getLiveDocs();
final LeafBucketCollector collector = queue.getLeafCollector(leadSourceBucket, context, queueCollector);
while (iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (liveDocs == null || liveDocs.get(iterator.docID())) {
collector.collect(iterator.docID());
}
}
if (queue.isFull() && hasCollected[0] && topCompositeCollected[0] == 0) {
return true;
}
return false;
}
use of org.opensearch.search.aggregations.LeafBucketCollector in project OpenSearch by opensearch-project.
the class LongValuesSource method getLeafCollector.
@Override
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException {
final SortedNumericDocValues dvs = docValuesFunc.apply(context);
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
currentValue = dvs.nextValue();
missingCurrentValue = false;
next.collect(doc, bucket);
}
} else if (missingBucket) {
missingCurrentValue = true;
next.collect(doc, bucket);
}
}
};
}
use of org.opensearch.search.aggregations.LeafBucketCollector in project OpenSearch by opensearch-project.
the class CompositeAggregator method processLeafFromQuery.
private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) throws IOException {
DocValueFormat[] formats = new DocValueFormat[indexSortPrefix.getSort().length];
for (int i = 0; i < formats.length; i++) {
formats[i] = sources[i].format;
}
FieldDoc fieldDoc = SearchAfterBuilder.buildFieldDoc(new SortAndFormats(indexSortPrefix, formats), Arrays.copyOfRange(rawAfterKey.values(), 0, formats.length));
if (indexSortPrefix.getSort().length < sources.length) {
// include all docs that belong to the partial bucket
fieldDoc.doc = -1;
}
BooleanQuery newQuery = new BooleanQuery.Builder().add(context.query(), BooleanClause.Occur.MUST).add(new SearchAfterSortedDocQuery(applySortFieldRounding(indexSortPrefix), fieldDoc), BooleanClause.Occur.FILTER).build();
Weight weight = context.searcher().createWeight(context.searcher().rewrite(newQuery), ScoreMode.COMPLETE_NO_SCORES, 1f);
Scorer scorer = weight.scorer(ctx);
if (scorer != null) {
DocIdSetIterator docIt = scorer.iterator();
final LeafBucketCollector inner = queue.getLeafCollector(ctx, getFirstPassCollector(docIdSetBuilder, indexSortPrefix.getSort().length));
inner.setScorer(scorer);
final Bits liveDocs = ctx.reader().getLiveDocs();
while (docIt.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (liveDocs == null || liveDocs.get(docIt.docID())) {
inner.collect(docIt.docID());
}
}
}
}
Aggregations