use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class InternalGeoCentroid method doReduce.
@Override
public InternalGeoCentroid doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
double lonSum = Double.NaN;
double latSum = Double.NaN;
int totalCount = 0;
for (InternalAggregation aggregation : aggregations) {
InternalGeoCentroid centroidAgg = (InternalGeoCentroid) aggregation;
if (centroidAgg.count > 0) {
totalCount += centroidAgg.count;
if (Double.isNaN(lonSum)) {
lonSum = centroidAgg.count * centroidAgg.centroid.getLon();
latSum = centroidAgg.count * centroidAgg.centroid.getLat();
} else {
lonSum += (centroidAgg.count * centroidAgg.centroid.getLon());
latSum += (centroidAgg.count * centroidAgg.centroid.getLat());
}
}
}
final GeoPoint result = (Double.isNaN(lonSum)) ? null : new GeoPoint(latSum / totalCount, lonSum / totalCount);
return new InternalGeoCentroid(name, result, totalCount, pipelineAggregators(), getMetaData());
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class SearchModule method registerAggregation.
private void registerAggregation(AggregationSpec spec) {
if (false == transportClient) {
namedXContents.add(new NamedXContentRegistry.Entry(BaseAggregationBuilder.class, spec.getName(), (p, c) -> {
AggregatorFactories.AggParseContext context = (AggregatorFactories.AggParseContext) c;
return spec.getParser().parse(context.name, context.queryParseContext);
}));
}
namedWriteables.add(new NamedWriteableRegistry.Entry(AggregationBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
for (Map.Entry<String, Writeable.Reader<? extends InternalAggregation>> t : spec.getResultReaders().entrySet()) {
String writeableName = t.getKey();
Writeable.Reader<? extends InternalAggregation> internalReader = t.getValue();
namedWriteables.add(new NamedWriteableRegistry.Entry(InternalAggregation.class, writeableName, internalReader));
}
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class InternalDateHistogram method reduceBuckets.
private List<Bucket> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
final PriorityQueue<IteratorAndCurrent> pq = new PriorityQueue<IteratorAndCurrent>(aggregations.size()) {
@Override
protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
return a.current.key < b.current.key;
}
};
for (InternalAggregation aggregation : aggregations) {
InternalDateHistogram histogram = (InternalDateHistogram) aggregation;
if (histogram.buckets.isEmpty() == false) {
pq.add(new IteratorAndCurrent(histogram.buckets.iterator()));
}
}
List<Bucket> reducedBuckets = new ArrayList<>();
if (pq.size() > 0) {
// list of buckets coming from different shards that have the same key
List<Bucket> currentBuckets = new ArrayList<>();
double key = pq.top().current.key;
do {
final IteratorAndCurrent top = pq.top();
if (top.current.key != key) {
// the key changes, reduce what we already buffered and reset the buffer for current buckets
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
reducedBuckets.add(reduced);
}
currentBuckets.clear();
key = top.current.key;
}
currentBuckets.add(top.current);
if (top.iterator.hasNext()) {
final Bucket next = top.iterator.next();
assert next.key > top.current.key : "shards must return data sorted by key";
top.current = next;
pq.updateTop();
} else {
pq.pop();
}
} while (pq.size() > 0);
if (currentBuckets.isEmpty() == false) {
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
reducedBuckets.add(reduced);
}
}
}
return reducedBuckets;
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class InternalFilters method doReduce.
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
List<List<InternalBucket>> bucketsList = null;
for (InternalAggregation aggregation : aggregations) {
InternalFilters filters = (InternalFilters) aggregation;
if (bucketsList == null) {
bucketsList = new ArrayList<>(filters.buckets.size());
for (InternalBucket bucket : filters.buckets) {
List<InternalBucket> sameRangeList = new ArrayList<>(aggregations.size());
sameRangeList.add(bucket);
bucketsList.add(sameRangeList);
}
} else {
int i = 0;
for (InternalBucket bucket : filters.buckets) {
bucketsList.get(i++).add(bucket);
}
}
}
InternalFilters reduced = new InternalFilters(name, new ArrayList<InternalBucket>(bucketsList.size()), keyed, pipelineAggregators(), getMetaData());
for (List<InternalBucket> sameRangeList : bucketsList) {
reduced.buckets.add((sameRangeList.get(0)).reduce(sameRangeList, reduceContext));
}
return reduced;
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class SearchPhaseController method reduceAggs.
private InternalAggregations reduceAggs(List<InternalAggregations> aggregationsList, List<SiblingPipelineAggregator> pipelineAggregators, ReduceContext reduceContext) {
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
if (pipelineAggregators != null) {
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> (InternalAggregation) p).collect(Collectors.toList());
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext);
newAggs.add(newAgg);
}
return new InternalAggregations(newAggs);
}
return aggregations;
}
Aggregations