Search in sources :

Example 1 with Bucket

use of org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket in project elasticsearch by elastic.

the class BucketScriptPipelineAggregator method reduce.

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
    List<? extends Bucket> buckets = originalAgg.getBuckets();
    CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS);
    List newBuckets = new ArrayList<>();
    for (Bucket bucket : buckets) {
        Map<String, Object> vars = new HashMap<>();
        if (script.getParams() != null) {
            vars.putAll(script.getParams());
        }
        boolean skipBucket = false;
        for (Map.Entry<String, String> entry : bucketsPathsMap.entrySet()) {
            String varName = entry.getKey();
            String bucketsPath = entry.getValue();
            Double value = resolveBucketValue(originalAgg, bucket, bucketsPath, gapPolicy);
            if (GapPolicy.SKIP == gapPolicy && (value == null || Double.isNaN(value))) {
                skipBucket = true;
                break;
            }
            vars.put(varName, value);
        }
        if (skipBucket) {
            newBuckets.add(bucket);
        } else {
            ExecutableScript executableScript = reduceContext.scriptService().executable(compiledScript, vars);
            Object returned = executableScript.run();
            if (returned == null) {
                newBuckets.add(bucket);
            } else {
                if (!(returned instanceof Number)) {
                    throw new AggregationExecutionException("series_arithmetic script for reducer [" + name() + "] must return a Number");
                }
                final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
                    return (InternalAggregation) p;
                }).collect(Collectors.toList());
                aggs.add(new InternalSimpleValue(name(), ((Number) returned).doubleValue(), formatter, new ArrayList<>(), metaData()));
                InternalMultiBucketAggregation.InternalBucket newBucket = originalAgg.createBucket(new InternalAggregations(aggs), (InternalMultiBucketAggregation.InternalBucket) bucket);
                newBuckets.add(newBucket);
            }
        }
    }
    return originalAgg.create(newBuckets);
}
Also used : CompiledScript(org.elasticsearch.script.CompiledScript) StreamOutput(org.elasticsearch.common.io.stream.StreamOutput) HashMap(java.util.HashMap) DocValueFormat(org.elasticsearch.search.DocValueFormat) ScriptContext(org.elasticsearch.script.ScriptContext) ArrayList(java.util.ArrayList) InternalMultiBucketAggregation(org.elasticsearch.search.aggregations.InternalMultiBucketAggregation) BucketHelpers.resolveBucketValue(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) Map(java.util.Map) GapPolicy(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy) StreamSupport(java.util.stream.StreamSupport) AggregationExecutionException(org.elasticsearch.search.aggregations.AggregationExecutionException) Script(org.elasticsearch.script.Script) PipelineAggregator(org.elasticsearch.search.aggregations.pipeline.PipelineAggregator) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) ReduceContext(org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext) List(java.util.List) CompiledScript(org.elasticsearch.script.CompiledScript) InternalSimpleValue(org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue) StreamInput(org.elasticsearch.common.io.stream.StreamInput) ExecutableScript(org.elasticsearch.script.ExecutableScript) Collections(java.util.Collections) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ExecutableScript(org.elasticsearch.script.ExecutableScript) ArrayList(java.util.ArrayList) List(java.util.List) InternalMultiBucketAggregation(org.elasticsearch.search.aggregations.InternalMultiBucketAggregation) AggregationExecutionException(org.elasticsearch.search.aggregations.AggregationExecutionException) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) InternalSimpleValue(org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) HashMap(java.util.HashMap) Map(java.util.Map)

Example 2 with Bucket

use of org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket in project elasticsearch by elastic.

the class CumulativeSumPipelineAggregator method reduce.

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
    List<? extends Bucket> buckets = histo.getBuckets();
    HistogramFactory factory = (HistogramFactory) histo;
    List<Bucket> newBuckets = new ArrayList<>();
    double sum = 0;
    for (Bucket bucket : buckets) {
        Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);
        sum += thisBucketValue;
        List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
            return (InternalAggregation) p;
        }).collect(Collectors.toList());
        aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<PipelineAggregator>(), metaData()));
        Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
        newBuckets.add(newBucket);
    }
    return factory.createAggregation(newBuckets);
}
Also used : StreamOutput(org.elasticsearch.common.io.stream.StreamOutput) PipelineAggregator(org.elasticsearch.search.aggregations.pipeline.PipelineAggregator) IOException(java.io.IOException) DocValueFormat(org.elasticsearch.search.DocValueFormat) Collectors(java.util.stream.Collectors) ArrayList(java.util.ArrayList) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) ReduceContext(org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext) List(java.util.List) BucketHelpers.resolveBucketValue(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) InternalSimpleValue(org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue) StreamInput(org.elasticsearch.common.io.stream.StreamInput) Map(java.util.Map) GapPolicy(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy) StreamSupport(java.util.stream.StreamSupport) HistogramFactory(org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory) ArrayList(java.util.ArrayList) HistogramFactory(org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) InternalSimpleValue(org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation)

Example 3 with Bucket

use of org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket in project elasticsearch by elastic.

the class DerivativePipelineAggregator method reduce.

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
    List<? extends Bucket> buckets = histo.getBuckets();
    HistogramFactory factory = (HistogramFactory) histo;
    List<Bucket> newBuckets = new ArrayList<>();
    Number lastBucketKey = null;
    Double lastBucketValue = null;
    for (Bucket bucket : buckets) {
        Number thisBucketKey = factory.getKey(bucket);
        Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
        if (lastBucketValue != null && thisBucketValue != null) {
            double gradient = thisBucketValue - lastBucketValue;
            double xDiff = -1;
            if (xAxisUnits != null) {
                xDiff = (thisBucketKey.doubleValue() - lastBucketKey.doubleValue()) / xAxisUnits;
            }
            final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
                return (InternalAggregation) p;
            }).collect(Collectors.toList());
            aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
            Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
            newBuckets.add(newBucket);
        } else {
            newBuckets.add(bucket);
        }
        lastBucketKey = thisBucketKey;
        lastBucketValue = thisBucketValue;
    }
    return factory.createAggregation(newBuckets);
}
Also used : StreamOutput(org.elasticsearch.common.io.stream.StreamOutput) PipelineAggregator(org.elasticsearch.search.aggregations.pipeline.PipelineAggregator) IOException(java.io.IOException) DocValueFormat(org.elasticsearch.search.DocValueFormat) Collectors(java.util.stream.Collectors) ArrayList(java.util.ArrayList) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) ReduceContext(org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext) List(java.util.List) BucketHelpers.resolveBucketValue(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) StreamInput(org.elasticsearch.common.io.stream.StreamInput) Map(java.util.Map) GapPolicy(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy) StreamSupport(java.util.stream.StreamSupport) HistogramFactory(org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory) ArrayList(java.util.ArrayList) HistogramFactory(org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation)

Example 4 with Bucket

use of org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket in project elasticsearch by elastic.

the class MovAvgPipelineAggregator method reduce.

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
    List<? extends Bucket> buckets = histo.getBuckets();
    HistogramFactory factory = (HistogramFactory) histo;
    List<Bucket> newBuckets = new ArrayList<>();
    EvictingQueue<Double> values = new EvictingQueue<>(this.window);
    Number lastValidKey = 0;
    int lastValidPosition = 0;
    int counter = 0;
    // Do we need to fit the model parameters to the data?
    if (minimize) {
        assert (model.canBeMinimized());
        model = minimize(buckets, histo, model);
    }
    for (Bucket bucket : buckets) {
        Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
        // Default is to reuse existing bucket.  Simplifies the rest of the logic,
        // since we only change newBucket if we can add to it
        Bucket newBucket = bucket;
        if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) {
            // Some models (e.g. HoltWinters) have certain preconditions that must be met
            if (model.hasValue(values.size())) {
                double movavg = model.next(values);
                List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
                    return (InternalAggregation) p;
                }).collect(Collectors.toList());
                aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<PipelineAggregator>(), metaData()));
                newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
            }
            if (predict > 0) {
                lastValidKey = factory.getKey(bucket);
                lastValidPosition = counter;
            }
            values.offer(thisBucketValue);
        }
        counter += 1;
        newBuckets.add(newBucket);
    }
    if (buckets.size() > 0 && predict > 0) {
        double[] predictions = model.predict(values, predict);
        for (int i = 0; i < predictions.length; i++) {
            List<InternalAggregation> aggs;
            Number newKey = factory.nextKey(lastValidKey);
            if (lastValidPosition + i + 1 < newBuckets.size()) {
                Bucket bucket = newBuckets.get(lastValidPosition + i + 1);
                // Get the existing aggs in the bucket so we don't clobber data
                aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
                    return (InternalAggregation) p;
                }).collect(Collectors.toList());
                aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));
                Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));
                // Overwrite the existing bucket with the new version
                newBuckets.set(lastValidPosition + i + 1, newBucket);
            } else {
                // Not seen before, create fresh
                aggs = new ArrayList<>();
                aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));
                Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));
                // Since this is a new bucket, simply append it
                newBuckets.add(newBucket);
            }
            lastValidKey = newKey;
        }
    }
    return factory.createAggregation(newBuckets);
}
Also used : StreamOutput(org.elasticsearch.common.io.stream.StreamOutput) PipelineAggregator(org.elasticsearch.search.aggregations.pipeline.PipelineAggregator) ListIterator(java.util.ListIterator) IOException(java.io.IOException) DocValueFormat(org.elasticsearch.search.DocValueFormat) MovAvgModel(org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel) Collectors(java.util.stream.Collectors) EvictingQueue(org.elasticsearch.common.collect.EvictingQueue) ArrayList(java.util.ArrayList) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) ReduceContext(org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext) List(java.util.List) BucketHelpers.resolveBucketValue(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) InternalSimpleValue(org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue) StreamInput(org.elasticsearch.common.io.stream.StreamInput) Map(java.util.Map) GapPolicy(org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy) StreamSupport(java.util.stream.StreamSupport) HistogramFactory(org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory) ArrayList(java.util.ArrayList) HistogramFactory(org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory) InternalAggregation(org.elasticsearch.search.aggregations.InternalAggregation) InternalSimpleValue(org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue) InternalAggregations(org.elasticsearch.search.aggregations.InternalAggregations) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation) EvictingQueue(org.elasticsearch.common.collect.EvictingQueue)

Example 5 with Bucket

use of org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket in project metasfresh-webui-api by metasfresh.

the class KPIDataLoader method loadData.

private void loadData(final KPIDataResult.Builder data, final TimeRange timeRange) {
    logger.trace("Loading data for {}", timeRange);
    // 
    // Create query evaluation context
    final Evaluatee evalCtx = Evaluatees.mapBuilder().put("MainFromMillis", data.getRange().getFromMillis()).put("MainToMillis", data.getRange().getToMillis()).put("FromMillis", timeRange.getFromMillis()).put("ToMillis", timeRange.getToMillis()).build().andComposeWith(Evaluatees.ofCtx(Env.getCtx()));
    // 
    // Resolve esQuery's variables
    final IStringExpression esQuery = kpi.getESQuery();
    final String esQueryParsed = esQuery.evaluate(evalCtx, OnVariableNotFound.Preserve);
    // 
    // Execute the query
    final SearchResponse response;
    try {
        logger.trace("Executing: \n{}", esQueryParsed);
        response = elasticsearchClient.prepareSearch(kpi.getESSearchIndex()).setTypes(kpi.getESSearchTypes()).setSource(esQueryParsed).get();
        logger.trace("Got response: \n{}", response);
    } catch (final NoNodeAvailableException e) {
        // elastic search transport error => nothing to do about it
        throw e;
    } catch (final Exception e) {
        throw new AdempiereException("Failed executing query for " + this + ": " + e.getLocalizedMessage() + "\nQuery: " + esQueryParsed, e);
    }
    // Fetch data
    try {
        final List<Aggregation> aggregations = response.getAggregations().asList();
        for (final Aggregation agg : aggregations) {
            if (agg instanceof MultiBucketsAggregation) {
                final String aggName = agg.getName();
                final MultiBucketsAggregation multiBucketsAggregation = (MultiBucketsAggregation) agg;
                for (final Bucket bucket : multiBucketsAggregation.getBuckets()) {
                    final Object key = dataSetValueKeyExtractor.apply(bucket, timeRange);
                    for (final KPIField field : kpi.getFields()) {
                        final Object value = field.getBucketValueExtractor().extractValue(aggName, bucket);
                        final Object jsonValue = formatValue(field, value);
                        if (jsonValue == null) {
                            continue;
                        }
                        final String fieldName = fieldNameExtractor.apply(field, timeRange);
                        data.putValue(aggName, key, fieldName, jsonValue);
                    }
                    // 
                    // Make sure the groupByField's value is present in our dataSet value.
                    // If not exist, we can use the key as it's value.
                    final KPIField groupByField = kpi.getGroupByFieldOrNull();
                    if (groupByField != null) {
                        data.putValueIfAbsent(aggName, key, groupByField.getFieldName(), key);
                    }
                }
            } else if (agg instanceof NumericMetricsAggregation.SingleValue) {
                final NumericMetricsAggregation.SingleValue singleValueAggregation = (NumericMetricsAggregation.SingleValue) agg;
                // N/A
                final String key = "NO_KEY";
                for (final KPIField field : kpi.getFields()) {
                    final Object value;
                    if ("value".equals(field.getESPathAsString())) {
                        value = singleValueAggregation.value();
                    } else {
                        throw new IllegalStateException("Only ES path ending with 'value' allowed for field: " + field);
                    }
                    final Object jsonValue = field.convertValueToJson(value);
                    data.putValue(agg.getName(), key, field.getFieldName(), jsonValue);
                }
            } else {
                new AdempiereException("Aggregation type not supported: " + agg.getClass()).throwIfDeveloperModeOrLogWarningElse(logger);
            }
        }
    } catch (final Exception e) {
        throw new AdempiereException(e.getLocalizedMessage() + "\n KPI: " + this + "\n Query: " + esQueryParsed + "\n Response: " + response, e);
    }
}
Also used : NumericMetricsAggregation(org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation) NoNodeAvailableException(org.elasticsearch.client.transport.NoNodeAvailableException) NoNodeAvailableException(org.elasticsearch.client.transport.NoNodeAvailableException) AdempiereException(org.adempiere.exceptions.AdempiereException) Evaluatee(org.compiere.util.Evaluatee) SearchResponse(org.elasticsearch.action.search.SearchResponse) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation) Aggregation(org.elasticsearch.search.aggregations.Aggregation) NumericMetricsAggregation(org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation) Bucket(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket) AdempiereException(org.adempiere.exceptions.AdempiereException) MultiBucketsAggregation(org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation) IStringExpression(org.adempiere.ad.expression.api.IStringExpression)

Aggregations

Bucket (org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket)16 List (java.util.List)8 Map (java.util.Map)8 ArrayList (java.util.ArrayList)7 InternalAggregation (org.elasticsearch.search.aggregations.InternalAggregation)7 MultiBucketsAggregation (org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation)7 FacetValue (io.vertigo.dynamo.collections.model.FacetValue)6 IOException (java.io.IOException)6 LinkedHashMap (java.util.LinkedHashMap)6 Collectors (java.util.stream.Collectors)6 StreamSupport (java.util.stream.StreamSupport)6 StreamInput (org.elasticsearch.common.io.stream.StreamInput)6 ReduceContext (org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext)6 InternalAggregations (org.elasticsearch.search.aggregations.InternalAggregations)6 StreamOutput (org.elasticsearch.common.io.stream.StreamOutput)5 DocValueFormat (org.elasticsearch.search.DocValueFormat)5 GapPolicy (org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy)5 BucketHelpers.resolveBucketValue (org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue)5 PipelineAggregator (org.elasticsearch.search.aggregations.pipeline.PipelineAggregator)5 MessageText (io.vertigo.core.locale.MessageText)4