Search in sources :

Example 1 with SetBackedScalingCuckooFilter

use of org.opensearch.common.util.SetBackedScalingCuckooFilter in project OpenSearch by opensearch-project.

the class InternalMappedRareTerms method reduce.

@Override
public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
    Map<Object, List<B>> buckets = new HashMap<>();
    InternalRareTerms<A, B> referenceTerms = null;
    SetBackedScalingCuckooFilter filter = null;
    for (InternalAggregation aggregation : aggregations) {
        // and save some type casting headaches later.
        if (aggregation.isMapped() == false) {
            continue;
        }
        @SuppressWarnings("unchecked") InternalRareTerms<A, B> terms = (InternalRareTerms<A, B>) aggregation;
        if (referenceTerms == null && aggregation.getClass().equals(UnmappedRareTerms.class) == false) {
            referenceTerms = terms;
        }
        if (referenceTerms != null && referenceTerms.getClass().equals(terms.getClass()) == false && terms.getClass().equals(UnmappedRareTerms.class) == false) {
            // is of different types in different indices.
            throw new AggregationExecutionException("Merging/Reducing the aggregations failed when computing the aggregation [" + referenceTerms.getName() + "] because the field you gave in the aggregation query existed as two different " + "types in two different indices");
        }
        for (B bucket : terms.getBuckets()) {
            List<B> bucketList = buckets.computeIfAbsent(bucket.getKey(), k -> new ArrayList<>());
            bucketList.add(bucket);
        }
        SetBackedScalingCuckooFilter otherFilter = ((InternalMappedRareTerms) aggregation).getFilter();
        if (filter == null) {
            filter = new SetBackedScalingCuckooFilter(otherFilter);
        } else {
            filter.merge(otherFilter);
        }
    }
    final List<B> rare = new ArrayList<>();
    for (List<B> sameTermBuckets : buckets.values()) {
        final B b = reduceBucket(sameTermBuckets, reduceContext);
        if ((b.getDocCount() <= maxDocCount && containsTerm(filter, b) == false)) {
            rare.add(b);
            reduceContext.consumeBucketsAndMaybeBreak(1);
        } else if (b.getDocCount() > maxDocCount) {
            // this term has gone over threshold while merging, so add it to the filter.
            // Note this may happen during incremental reductions too
            addToFilter(filter, b);
        }
    }
    CollectionUtil.introSort(rare, order.comparator());
    return createWithFilter(name, rare, filter);
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) InternalAggregation(org.opensearch.search.aggregations.InternalAggregation) SetBackedScalingCuckooFilter(org.opensearch.common.util.SetBackedScalingCuckooFilter) ArrayList(java.util.ArrayList) List(java.util.List) AggregationExecutionException(org.opensearch.search.aggregations.AggregationExecutionException)

Example 2 with SetBackedScalingCuckooFilter

use of org.opensearch.common.util.SetBackedScalingCuckooFilter in project OpenSearch by opensearch-project.

the class StringRareTermsAggregator method buildAggregations.

@Override
public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
    /*
         * Collect the list of buckets, populate the filter with terms
         * that are too frequent, and figure out how to merge sub-buckets.
         */
    StringRareTerms.Bucket[][] rarestPerOrd = new StringRareTerms.Bucket[owningBucketOrds.length][];
    SetBackedScalingCuckooFilter[] filters = new SetBackedScalingCuckooFilter[owningBucketOrds.length];
    long keepCount = 0;
    long[] mergeMap = new long[(int) bucketOrds.size()];
    Arrays.fill(mergeMap, -1);
    long offset = 0;
    for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) {
        try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(1, context.bigArrays())) {
            filters[owningOrdIdx] = newFilter();
            List<StringRareTerms.Bucket> builtBuckets = new ArrayList<>();
            BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]);
            BytesRef scratch = new BytesRef();
            while (collectedBuckets.next()) {
                collectedBuckets.readValue(scratch);
                long docCount = bucketDocCount(collectedBuckets.ord());
                // if the key is below threshold, reinsert into the new ords
                if (docCount <= maxDocCount) {
                    StringRareTerms.Bucket bucket = new StringRareTerms.Bucket(BytesRef.deepCopyOf(scratch), docCount, null, format);
                    bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(scratch);
                    mergeMap[(int) collectedBuckets.ord()] = bucket.bucketOrd;
                    builtBuckets.add(bucket);
                    keepCount++;
                } else {
                    filters[owningOrdIdx].add(scratch);
                }
            }
            rarestPerOrd[owningOrdIdx] = builtBuckets.toArray(new StringRareTerms.Bucket[0]);
            offset += bucketsInThisOwningBucketToCollect.size();
        }
    }
    /*
         * Only merge/delete the ordinals if we have actually deleted one,
         * to save on some redundant work.
         */
    if (keepCount != mergeMap.length) {
        mergeBuckets(mergeMap, offset);
        if (deferringCollector != null) {
            deferringCollector.mergeBuckets(mergeMap);
        }
    }
    /*
         * Now build the results!
         */
    buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
    InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
    for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
        Arrays.sort(rarestPerOrd[ordIdx], ORDER.comparator());
        result[ordIdx] = new StringRareTerms(name, ORDER, metadata(), format, Arrays.asList(rarestPerOrd[ordIdx]), maxDocCount, filters[ordIdx]);
    }
    return result;
}
Also used : ArrayList(java.util.ArrayList) InternalAggregation(org.opensearch.search.aggregations.InternalAggregation) SetBackedScalingCuckooFilter(org.opensearch.common.util.SetBackedScalingCuckooFilter) BytesRefHash(org.opensearch.common.util.BytesRefHash) BytesRef(org.apache.lucene.util.BytesRef)

Example 3 with SetBackedScalingCuckooFilter

use of org.opensearch.common.util.SetBackedScalingCuckooFilter in project OpenSearch by opensearch-project.

the class LongRareTermsTests method createTestInstance.

@Override
protected InternalRareTerms<?, ?> createTestInstance(String name, Map<String, Object> metadata, InternalAggregations aggregations, long maxDocCount) {
    BucketOrder order = BucketOrder.count(false);
    DocValueFormat format = randomNumericDocValueFormat();
    List<LongRareTerms.Bucket> buckets = new ArrayList<>();
    final int numBuckets = randomNumberOfBuckets();
    for (int i = 0; i < numBuckets; ++i) {
        long term = randomLong();
        int docCount = randomIntBetween(1, 100);
        buckets.add(new LongRareTerms.Bucket(term, docCount, aggregations, format));
    }
    SetBackedScalingCuckooFilter filter = new SetBackedScalingCuckooFilter(1000, Randomness.get(), 0.01);
    return new LongRareTerms(name, order, metadata, format, buckets, maxDocCount, filter);
}
Also used : BucketOrder(org.opensearch.search.aggregations.BucketOrder) SetBackedScalingCuckooFilter(org.opensearch.common.util.SetBackedScalingCuckooFilter) DocValueFormat(org.opensearch.search.DocValueFormat) ArrayList(java.util.ArrayList)

Example 4 with SetBackedScalingCuckooFilter

use of org.opensearch.common.util.SetBackedScalingCuckooFilter in project OpenSearch by opensearch-project.

the class LongRareTermsAggregator method buildAggregations.

@Override
public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
    /*
         * Collect the list of buckets, populate the filter with terms
         * that are too frequent, and figure out how to merge sub-buckets.
         */
    LongRareTerms.Bucket[][] rarestPerOrd = new LongRareTerms.Bucket[owningBucketOrds.length][];
    SetBackedScalingCuckooFilter[] filters = new SetBackedScalingCuckooFilter[owningBucketOrds.length];
    long keepCount = 0;
    long[] mergeMap = new long[(int) bucketOrds.size()];
    Arrays.fill(mergeMap, -1);
    long offset = 0;
    for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) {
        try (LongHash bucketsInThisOwningBucketToCollect = new LongHash(1, context.bigArrays())) {
            filters[owningOrdIdx] = newFilter();
            List<LongRareTerms.Bucket> builtBuckets = new ArrayList<>();
            LongKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]);
            while (collectedBuckets.next()) {
                long docCount = bucketDocCount(collectedBuckets.ord());
                // if the key is below threshold, reinsert into the new ords
                if (docCount <= maxDocCount) {
                    LongRareTerms.Bucket bucket = new LongRareTerms.Bucket(collectedBuckets.value(), docCount, null, format);
                    bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(collectedBuckets.value());
                    mergeMap[(int) collectedBuckets.ord()] = bucket.bucketOrd;
                    builtBuckets.add(bucket);
                    keepCount++;
                } else {
                    filters[owningOrdIdx].add(collectedBuckets.value());
                }
            }
            rarestPerOrd[owningOrdIdx] = builtBuckets.toArray(new LongRareTerms.Bucket[0]);
            offset += bucketsInThisOwningBucketToCollect.size();
        }
    }
    /*
         * Only merge/delete the ordinals if we have actually deleted one,
         * to save on some redundant work.
         */
    if (keepCount != mergeMap.length) {
        mergeBuckets(mergeMap, offset);
        if (deferringCollector != null) {
            deferringCollector.mergeBuckets(mergeMap);
        }
    }
    /*
         * Now build the results!
         */
    buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
    InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
    for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
        Arrays.sort(rarestPerOrd[ordIdx], ORDER.comparator());
        result[ordIdx] = new LongRareTerms(name, ORDER, metadata(), format, Arrays.asList(rarestPerOrd[ordIdx]), maxDocCount, filters[ordIdx]);
    }
    return result;
}
Also used : LongHash(org.opensearch.common.util.LongHash) ArrayList(java.util.ArrayList) InternalAggregation(org.opensearch.search.aggregations.InternalAggregation) SetBackedScalingCuckooFilter(org.opensearch.common.util.SetBackedScalingCuckooFilter)

Example 5 with SetBackedScalingCuckooFilter

use of org.opensearch.common.util.SetBackedScalingCuckooFilter in project OpenSearch by opensearch-project.

the class AbstractRareTermsAggregator method newFilter.

protected SetBackedScalingCuckooFilter newFilter() {
    SetBackedScalingCuckooFilter filter = new SetBackedScalingCuckooFilter(10000, new Random(filterSeed), precision);
    filter.registerBreaker(this::addRequestCircuitBreakerBytes);
    return filter;
}
Also used : Random(java.util.Random) SetBackedScalingCuckooFilter(org.opensearch.common.util.SetBackedScalingCuckooFilter)

Aggregations

SetBackedScalingCuckooFilter (org.opensearch.common.util.SetBackedScalingCuckooFilter)6 ArrayList (java.util.ArrayList)5 InternalAggregation (org.opensearch.search.aggregations.InternalAggregation)3 BytesRef (org.apache.lucene.util.BytesRef)2 DocValueFormat (org.opensearch.search.DocValueFormat)2 BucketOrder (org.opensearch.search.aggregations.BucketOrder)2 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Random (java.util.Random)1 BytesRefHash (org.opensearch.common.util.BytesRefHash)1 LongHash (org.opensearch.common.util.LongHash)1 AggregationExecutionException (org.opensearch.search.aggregations.AggregationExecutionException)1