use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class InternalExtendedStats method doReduce.
@Override
public InternalExtendedStats doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
double sumOfSqrs = 0;
for (InternalAggregation aggregation : aggregations) {
InternalExtendedStats stats = (InternalExtendedStats) aggregation;
if (stats.sigma != sigma) {
throw new IllegalStateException("Cannot reduce other stats aggregations that have a different sigma");
}
sumOfSqrs += stats.getSumOfSquares();
}
final InternalStats stats = super.doReduce(aggregations, reduceContext);
return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, format, pipelineAggregators(), getMetaData());
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class CumulativeSumPipelineAggregator method reduce.
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
List<? extends Bucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<Bucket> newBuckets = new ArrayList<>();
double sum = 0;
for (Bucket bucket : buckets) {
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);
sum += thisBucketValue;
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<PipelineAggregator>(), metaData()));
Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
newBuckets.add(newBucket);
}
return factory.createAggregation(newBuckets);
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class DerivativePipelineAggregator method reduce.
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
List<? extends Bucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<Bucket> newBuckets = new ArrayList<>();
Number lastBucketKey = null;
Double lastBucketValue = null;
for (Bucket bucket : buckets) {
Number thisBucketKey = factory.getKey(bucket);
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
if (lastBucketValue != null && thisBucketValue != null) {
double gradient = thisBucketValue - lastBucketValue;
double xDiff = -1;
if (xAxisUnits != null) {
xDiff = (thisBucketKey.doubleValue() - lastBucketKey.doubleValue()) / xAxisUnits;
}
final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
newBuckets.add(newBucket);
} else {
newBuckets.add(bucket);
}
lastBucketKey = thisBucketKey;
lastBucketValue = thisBucketValue;
}
return factory.createAggregation(newBuckets);
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class MovAvgPipelineAggregator method reduce.
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
List<? extends Bucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<Bucket> newBuckets = new ArrayList<>();
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
Number lastValidKey = 0;
int lastValidPosition = 0;
int counter = 0;
// Do we need to fit the model parameters to the data?
if (minimize) {
assert (model.canBeMinimized());
model = minimize(buckets, histo, model);
}
for (Bucket bucket : buckets) {
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
// Default is to reuse existing bucket. Simplifies the rest of the logic,
// since we only change newBucket if we can add to it
Bucket newBucket = bucket;
if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) {
// Some models (e.g. HoltWinters) have certain preconditions that must be met
if (model.hasValue(values.size())) {
double movavg = model.next(values);
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<PipelineAggregator>(), metaData()));
newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
}
if (predict > 0) {
lastValidKey = factory.getKey(bucket);
lastValidPosition = counter;
}
values.offer(thisBucketValue);
}
counter += 1;
newBuckets.add(newBucket);
}
if (buckets.size() > 0 && predict > 0) {
double[] predictions = model.predict(values, predict);
for (int i = 0; i < predictions.length; i++) {
List<InternalAggregation> aggs;
Number newKey = factory.nextKey(lastValidKey);
if (lastValidPosition + i + 1 < newBuckets.size()) {
Bucket bucket = newBuckets.get(lastValidPosition + i + 1);
// Get the existing aggs in the bucket so we don't clobber data
aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));
Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));
// Overwrite the existing bucket with the new version
newBuckets.set(lastValidPosition + i + 1, newBucket);
} else {
// Not seen before, create fresh
aggs = new ArrayList<>();
aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));
Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));
// Since this is a new bucket, simply append it
newBuckets.add(newBucket);
}
lastValidKey = newKey;
}
}
return factory.createAggregation(newBuckets);
}
use of org.elasticsearch.search.aggregations.InternalAggregation in project elasticsearch by elastic.
the class InternalStats method doReduce.
@Override
public InternalStats doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
long count = 0;
double min = Double.POSITIVE_INFINITY;
double max = Double.NEGATIVE_INFINITY;
double sum = 0;
for (InternalAggregation aggregation : aggregations) {
InternalStats stats = (InternalStats) aggregation;
count += stats.getCount();
min = Math.min(min, stats.getMin());
max = Math.max(max, stats.getMax());
sum += stats.getSum();
}
return new InternalStats(name, count, sum, min, max, format, pipelineAggregators(), getMetaData());
}
Aggregations