use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.metrics.Min in project graylog2-server by Graylog2.
the class ESPivot method extractEffectiveTimeRange.
private AbsoluteRange extractEffectiveTimeRange(SearchResponse queryResult, Query query, Pivot pivot) {
final Min min = queryResult.getAggregations().get("timestamp-min");
final Double from = min.getValue();
final Max max = queryResult.getAggregations().get("timestamp-max");
final Double to = max.getValue();
final TimeRange pivotRange = query.effectiveTimeRange(pivot);
return AbsoluteRange.create(isAllMessagesTimeRange(pivotRange) && from != 0 ? new DateTime(from.longValue(), DateTimeZone.UTC) : query.effectiveTimeRange(pivot).getFrom(), isAllMessagesTimeRange(pivotRange) && to != 0 ? new DateTime(to.longValue(), DateTimeZone.UTC) : query.effectiveTimeRange(pivot).getTo());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.metrics.Min in project sonarqube by SonarSource.
the class IssueIndex method getMinCreatedAt.
private OptionalLong getMinCreatedAt(AllFilters filters) {
String facetNameAndField = CREATED_AT.getFieldName();
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0);
BoolQueryBuilder esFilter = boolQuery();
filters.stream().filter(Objects::nonNull).forEach(esFilter::must);
if (esFilter.hasClauses()) {
sourceBuilder.query(QueryBuilders.boolQuery().filter(esFilter));
}
sourceBuilder.aggregation(AggregationBuilders.min(facetNameAndField).field(facetNameAndField));
SearchRequest request = EsClient.prepareSearch(TYPE_ISSUE.getMainType()).source(sourceBuilder);
Min minValue = client.search(request).getAggregations().get(facetNameAndField);
double actualValue = minValue.getValue();
if (Double.isInfinite(actualValue)) {
return OptionalLong.empty();
}
return OptionalLong.of((long) actualValue);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.metrics.Min in project graylog2-server by Graylog2.
the class ESPivot method doGenerateQueryPart.
@Override
public void doGenerateQueryPart(SearchJob job, Query query, Pivot pivot, ESGeneratedQueryContext queryContext) {
LOG.debug("Generating aggregation for {}", pivot);
final SearchSourceBuilder searchSourceBuilder = queryContext.searchSourceBuilder(pivot);
final Map<Object, Object> contextMap = queryContext.contextMap();
final AggTypes aggTypes = new AggTypes();
contextMap.put(pivot.id(), aggTypes);
// holds the initial level aggregation to be added to the query
AggregationBuilder topLevelAggregation = null;
// holds the last complete bucket aggregation into which subsequent buckets get added
AggregationBuilder previousAggregation = null;
// add global rollup series if those were requested
if (pivot.rollup()) {
seriesStream(pivot, queryContext, "global rollup").forEach(searchSourceBuilder::aggregation);
}
final Iterator<BucketSpec> rowBuckets = pivot.rowGroups().iterator();
while (rowBuckets.hasNext()) {
final BucketSpec bucketSpec = rowBuckets.next();
final String name = queryContext.nextName();
LOG.debug("Creating row group aggregation '{}' as {}", bucketSpec.type(), name);
final ESPivotBucketSpecHandler<? extends PivotSpec, ? extends Aggregation> handler = bucketHandlers.get(bucketSpec.type());
if (handler == null) {
throw new IllegalArgumentException("Unknown row_group type " + bucketSpec.type());
}
final Optional<AggregationBuilder> generatedAggregation = handler.createAggregation(name, pivot, bucketSpec, this, queryContext, query);
if (generatedAggregation.isPresent()) {
final AggregationBuilder aggregationBuilder = generatedAggregation.get();
if (topLevelAggregation == null) {
topLevelAggregation = aggregationBuilder;
}
// always insert the series for the final row group, or for each one if explicit rollup was requested
if (!rowBuckets.hasNext() || pivot.rollup()) {
seriesStream(pivot, queryContext, !rowBuckets.hasNext() ? "leaf row" : "row rollup").forEach(aggregationBuilder::subAggregation);
}
if (previousAggregation != null) {
previousAggregation.subAggregation(aggregationBuilder);
} else {
searchSourceBuilder.aggregation(aggregationBuilder);
}
previousAggregation = aggregationBuilder;
}
}
final Iterator<BucketSpec> colBuckets = pivot.columnGroups().iterator();
while (colBuckets.hasNext()) {
final BucketSpec bucketSpec = colBuckets.next();
final String name = queryContext.nextName();
LOG.debug("Creating column group aggregation '{}' as {}", bucketSpec.type(), name);
final ESPivotBucketSpecHandler<? extends PivotSpec, ? extends Aggregation> handler = bucketHandlers.get(bucketSpec.type());
if (handler == null) {
throw new IllegalArgumentException("Unknown column_group type " + bucketSpec.type());
}
final Optional<AggregationBuilder> generatedAggregation = handler.createAggregation(name, pivot, bucketSpec, this, queryContext, query);
if (generatedAggregation.isPresent()) {
final AggregationBuilder aggregationBuilder = generatedAggregation.get();
// always insert the series for the final row group, or for each one if explicit rollup was requested
if (!colBuckets.hasNext() || pivot.rollup()) {
seriesStream(pivot, queryContext, !colBuckets.hasNext() ? "leaf column" : "column rollup").forEach(aggregationBuilder::subAggregation);
}
if (previousAggregation != null) {
previousAggregation.subAggregation(aggregationBuilder);
} else {
searchSourceBuilder.aggregation(aggregationBuilder);
}
previousAggregation = aggregationBuilder;
}
}
final MinAggregationBuilder startTimestamp = AggregationBuilders.min("timestamp-min").field("timestamp");
final MaxAggregationBuilder endTimestamp = AggregationBuilders.max("timestamp-max").field("timestamp");
searchSourceBuilder.aggregation(startTimestamp);
searchSourceBuilder.aggregation(endTimestamp);
if (topLevelAggregation == null) {
LOG.debug("No aggregations generated for {}", pivot);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.metrics.Min in project graylog2-server by Graylog2.
the class IndicesAdapterES7 method indexRangeStatsOfIndex.
@Override
public IndexRangeStats indexRangeStatsOfIndex(String index) {
final FilterAggregationBuilder builder = AggregationBuilders.filter("agg", QueryBuilders.existsQuery(Message.FIELD_TIMESTAMP)).subAggregation(AggregationBuilders.min("ts_min").field(Message.FIELD_TIMESTAMP)).subAggregation(AggregationBuilders.max("ts_max").field(Message.FIELD_TIMESTAMP)).subAggregation(AggregationBuilders.terms("streams").size(Integer.MAX_VALUE).field(Message.FIELD_STREAMS));
final SearchSourceBuilder query = SearchSourceBuilder.searchSource().aggregation(builder).size(0);
final SearchRequest request = new SearchRequest().source(query).indices(index).searchType(SearchType.DFS_QUERY_THEN_FETCH).indicesOptions(IndicesOptions.lenientExpandOpen());
final SearchResponse result = client.execute((c, requestOptions) -> c.search(request, requestOptions), "Couldn't build index range of index " + index);
if (result.getTotalShards() == 0 || result.getAggregations() == null) {
throw new IndexNotFoundException("Couldn't build index range of index " + index + " because it doesn't exist.");
}
final Filter f = result.getAggregations().get("agg");
if (f == null) {
throw new IndexNotFoundException("Couldn't build index range of index " + index + " because it doesn't exist.");
} else if (f.getDocCount() == 0L) {
LOG.debug("No documents with attribute \"timestamp\" found in index <{}>", index);
return IndexRangeStats.EMPTY;
}
final Min minAgg = f.getAggregations().get("ts_min");
final long minUnixTime = new Double(minAgg.getValue()).longValue();
final DateTime min = new DateTime(minUnixTime, DateTimeZone.UTC);
final Max maxAgg = f.getAggregations().get("ts_max");
final long maxUnixTime = new Double(maxAgg.getValue()).longValue();
final DateTime max = new DateTime(maxUnixTime, DateTimeZone.UTC);
// make sure we return an empty list, so we can differentiate between old indices that don't have this information
// and newer ones that simply have no streams.
final Terms streams = f.getAggregations().get("streams");
final List<String> streamIds = streams.getBuckets().stream().map(MultiBucketsAggregation.Bucket::getKeyAsString).collect(toList());
return IndexRangeStats.create(min, max, streamIds);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.metrics.Min in project graylog2-server by Graylog2.
the class TestMultisearchResponse method getDefaultNamedXContents.
private static List<NamedXContentRegistry.Entry> getDefaultNamedXContents() {
Map<String, ContextParser<Object, ? extends Aggregation>> map = new HashMap<>();
map.put("cardinality", (p, c) -> ParsedCardinality.fromXContent(p, (String) c));
map.put("percentiles_bucket", (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c));
map.put("median_absolute_deviation", (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c));
map.put("min", (p, c) -> ParsedMin.fromXContent(p, (String) c));
map.put("max", (p, c) -> ParsedMax.fromXContent(p, (String) c));
map.put("sum", (p, c) -> ParsedSum.fromXContent(p, (String) c));
map.put("avg", (p, c) -> ParsedAvg.fromXContent(p, (String) c));
map.put("weighted_avg", (p, c) -> ParsedWeightedAvg.fromXContent(p, (String) c));
map.put("value_count", (p, c) -> ParsedValueCount.fromXContent(p, (String) c));
map.put("simple_value", (p, c) -> ParsedSimpleValue.fromXContent(p, (String) c));
map.put("derivative", (p, c) -> ParsedDerivative.fromXContent(p, (String) c));
map.put("bucket_metric_value", (p, c) -> ParsedBucketMetricValue.fromXContent(p, (String) c));
map.put("stats", (p, c) -> ParsedStats.fromXContent(p, (String) c));
map.put("stats_bucket", (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c));
map.put("extended_stats", (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c));
map.put("extended_stats_bucket", (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c));
map.put("histogram", (p, c) -> ParsedHistogram.fromXContent(p, (String) c));
map.put("date_histogram", (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c));
map.put("auto_date_histogram", (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c));
map.put("sterms", (p, c) -> ParsedStringTerms.fromXContent(p, (String) c));
map.put("lterms", (p, c) -> ParsedLongTerms.fromXContent(p, (String) c));
map.put("dterms", (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c));
map.put("missing", (p, c) -> ParsedMissing.fromXContent(p, (String) c));
map.put("nested", (p, c) -> ParsedNested.fromXContent(p, (String) c));
map.put("reverse_nested", (p, c) -> ParsedReverseNested.fromXContent(p, (String) c));
map.put("global", (p, c) -> ParsedGlobal.fromXContent(p, (String) c));
map.put("filter", (p, c) -> ParsedFilter.fromXContent(p, (String) c));
map.put("sampler", (p, c) -> ParsedSampler.fromXContent(p, (String) c));
map.put("range", (p, c) -> ParsedRange.fromXContent(p, (String) c));
map.put("date_range", (p, c) -> ParsedDateRange.fromXContent(p, (String) c));
map.put("filters", (p, c) -> ParsedFilters.fromXContent(p, (String) c));
map.put("top_hits", (p, c) -> ParsedTopHits.fromXContent(p, (String) c));
map.put("composite", (p, c) -> ParsedComposite.fromXContent(p, (String) c));
List<NamedXContentRegistry.Entry> entries = map.entrySet().stream().map((entry) -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())).collect(Collectors.toList());
entries.add(new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField("term"), (parser, context) -> TermSuggestion.fromXContent(parser, (String) context)));
entries.add(new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField("phrase"), (parser, context) -> PhraseSuggestion.fromXContent(parser, (String) context)));
entries.add(new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField("completion"), (parser, context) -> CompletionSuggestion.fromXContent(parser, (String) context)));
return entries;
}
Aggregations