use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.Aggregations in project graylog2-server by Graylog2.
the class ESPivotTest method searchResultForAllMessagesIncludesPivotTimerangeForNoDocuments.
@Test
public void searchResultForAllMessagesIncludesPivotTimerangeForNoDocuments() throws InvalidRangeParametersException {
DateTimeUtils.setCurrentMillisFixed(1578584665408L);
final long documentCount = 0;
returnDocumentCount(queryResult, documentCount);
final Aggregations mockMetricAggregation = createTimestampRangeAggregations(0d, 0d);
when(queryResult.getAggregations()).thenReturn(mockMetricAggregation);
when(query.effectiveTimeRange(pivot)).thenReturn(RelativeRange.create(0));
final SearchType.Result result = this.esPivot.doExtractResult(job, query, pivot, queryResult, aggregations, queryContext);
final PivotResult pivotResult = (PivotResult) result;
assertThat(pivotResult.effectiveTimerange()).isEqualTo(AbsoluteRange.create(DateTime.parse("1970-01-01T00:00:00.000Z"), DateTime.parse("2020-01-09T15:44:25.408Z")));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.Aggregations in project graylog2-server by Graylog2.
the class ESPivot method processRows.
/*
results from elasticsearch are nested so we need to recurse into the aggregation tree, but our result is a table, thus we need
to keep track of the current row keys manually
*/
private void processRows(PivotResult.Builder resultBuilder, SearchResponse searchResult, ESGeneratedQueryContext queryContext, Pivot pivot, List<BucketSpec> remainingRows, ArrayDeque<String> rowKeys, HasAggregations aggregation) {
if (remainingRows.isEmpty()) {
// this is the last row group, so we need to fork into the columns if they exist.
// being here also means that `rowKeys` contains the maximum number of parts, one for each combination of row bucket keys
// we will always add the series for this bucket, because that's the entire point of row groups
final PivotResult.Row.Builder rowBuilder = PivotResult.Row.builder().key(ImmutableList.copyOf(rowKeys));
// do the same for columns as we did for the rows
processColumns(rowBuilder, searchResult, queryContext, pivot, pivot.columnGroups(), new ArrayDeque<>(), aggregation);
// columnKeys is empty, because this is a rollup per row bucket, thus for all columns in that bucket (IOW it's not a leaf!)
if (pivot.rollup()) {
processSeries(rowBuilder, searchResult, queryContext, pivot, new ArrayDeque<>(), aggregation, true, "row-leaf");
}
resultBuilder.addRow(rowBuilder.source("leaf").build());
} else {
// this is not a leaf for the rows, so we add its key to the rowKeys and descend into the aggregation tree
// afterwards we'll check if we need to add rollup for intermediate buckets. not all clients need them so they can request
// to not calculate them
final BucketSpec currentBucket = remainingRows.get(0);
// this handler should never be missing, because we used it above to generate the query
// if it is missing for some weird reason, it's ok to fail hard here
final ESPivotBucketSpecHandler<? extends PivotSpec, ? extends Aggregation> handler = bucketHandlers.get(currentBucket.type());
final Aggregation aggregationResult = handler.extractAggregationFromResult(pivot, currentBucket, aggregation, queryContext);
final Stream<ESPivotBucketSpecHandler.Bucket> bucketStream = handler.handleResult(pivot, currentBucket, searchResult, aggregationResult, this, queryContext);
// for each bucket, recurse and eventually collect all the row keys. once we reach a leaf, we'll end up in the other if branch above
bucketStream.forEach(bucket -> {
// push the bucket's key and use its aggregation as the new source for sub-aggregations
rowKeys.addLast(bucket.key());
processRows(resultBuilder, searchResult, queryContext, pivot, tail(remainingRows), rowKeys, bucket.aggregation());
rowKeys.removeLast();
});
// also add the series for this row key if the client wants rollups
if (pivot.rollup()) {
final PivotResult.Row.Builder rowBuilder = PivotResult.Row.builder().key(ImmutableList.copyOf(rowKeys));
// columnKeys is empty, because this is a rollup per row bucket, thus for all columns in that bucket (IOW it's not a leaf!)
processSeries(rowBuilder, searchResult, queryContext, pivot, new ArrayDeque<>(), aggregation, true, "row-inner");
resultBuilder.addRow(rowBuilder.source("non-leaf").build());
}
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.Aggregations in project graylog2-server by Graylog2.
the class ESPivot method processColumns.
private void processColumns(PivotResult.Row.Builder rowBuilder, SearchResponse searchResult, ESGeneratedQueryContext queryContext, Pivot pivot, List<BucketSpec> remainingColumns, ArrayDeque<String> columnKeys, HasAggregations aggregation) {
if (remainingColumns.isEmpty()) {
// with duplicate data entries
if (!columnKeys.isEmpty()) {
processSeries(rowBuilder, searchResult, queryContext, pivot, columnKeys, aggregation, false, "col-leaf");
}
} else {
// for a non-leaf column group, we need to recurse further into the aggregation tree
// and if rollup was requested we'll add intermediate series according to the column keys
final BucketSpec currentBucket = remainingColumns.get(0);
// this handler should never be missing, because we used it above to generate the query
// if it is missing for some weird reason, it's ok to fail hard here
final ESPivotBucketSpecHandler<? extends PivotSpec, ? extends Aggregation> handler = bucketHandlers.get(currentBucket.type());
final Aggregation aggregationResult = handler.extractAggregationFromResult(pivot, currentBucket, aggregation, queryContext);
final Stream<ESPivotBucketSpecHandler.Bucket> bucketStream = handler.handleResult(pivot, currentBucket, searchResult, aggregationResult, this, queryContext);
// for each bucket, recurse and eventually collect all the column keys. once we reach a leaf, we'll end up in the other if branch above
bucketStream.forEach(bucket -> {
// push the bucket's key and use its aggregation as the new source for sub-aggregations
columnKeys.addLast(bucket.key());
processColumns(rowBuilder, searchResult, queryContext, pivot, tail(remainingColumns), columnKeys, bucket.aggregation());
columnKeys.removeLast();
});
// don't add the empty column key rollup, because that's not the correct bucket here, it's being done in the row-leaf code
if (pivot.rollup() && !columnKeys.isEmpty()) {
// columnKeys is not empty, because this is a rollup per column in a row
processSeries(rowBuilder, searchResult, queryContext, pivot, columnKeys, aggregation, true, "col-inner");
}
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.Aggregations in project graylog2-server by Graylog2.
the class ESCountHandler method doCreateAggregation.
@Nonnull
@Override
public Optional<AggregationBuilder> doCreateAggregation(String name, Pivot pivot, Count count, ESPivot searchTypeHandler, ESGeneratedQueryContext queryContext) {
final String field = count.field();
if (field == null) {
// doc_count is always present in elasticsearch's bucket aggregations, no need to add it
return Optional.empty();
} else {
// the request was for a field count, we have to add a value_count sub aggregation
final ValueCountAggregationBuilder value = AggregationBuilders.count(name).field(field);
record(queryContext, pivot, count, name, ValueCount.class);
return Optional.of(value);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.search.aggregations.Aggregations in project snow-owl by b2ihealthcare.
the class EsDocumentSearcher method aggregate.
@Override
public <T> Aggregation<T> aggregate(AggregationBuilder<T> aggregation) throws IOException {
final String aggregationName = aggregation.getName();
final EsClient client = admin.client();
final DocumentMapping mapping = admin.mappings().getMapping(aggregation.getFrom());
final EsQueryBuilder esQueryBuilder = new EsQueryBuilder(mapping, admin.settings(), admin.log());
final QueryBuilder esQuery = esQueryBuilder.build(aggregation.getQuery());
final SearchRequest req = new SearchRequest(admin.getTypeIndex(mapping));
final SearchSourceBuilder reqSource = req.source().query(esQuery).size(0).trackScores(false).trackTotalHitsUpTo(Integer.MAX_VALUE);
// field selection
final boolean fetchSource = applySourceFiltering(aggregation.getFields(), mapping, reqSource);
reqSource.aggregation(toEsAggregation(mapping, aggregation, fetchSource));
SearchResponse response = null;
try {
response = client.search(req);
} catch (Exception e) {
admin.log().error("Couldn't execute aggregation", e);
throw new IndexException("Couldn't execute aggregation: " + e.getMessage(), null);
}
ImmutableMap.Builder<Object, Bucket<T>> buckets = ImmutableMap.builder();
Aggregations topLevelAggregations = response.getAggregations();
Nested nested = topLevelAggregations.get(nestedAggName(aggregation));
Terms aggregationResult;
if (nested != null) {
aggregationResult = nested.getAggregations().get(aggregationName);
} else {
aggregationResult = topLevelAggregations.get(aggregationName);
}
for (org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket bucket : aggregationResult.getBuckets()) {
final TopHits topHits;
if (nested != null) {
final ReverseNested reverseNested = bucket.getAggregations().get(reverseNestedAggName(aggregation));
topHits = reverseNested.getAggregations().get(topHitsAggName(aggregation));
} else {
topHits = bucket.getAggregations().get(topHitsAggName(aggregation));
}
Hits<T> hits;
if (topHits != null) {
hits = toHits(aggregation.getSelect(), List.of(aggregation.getFrom()), aggregation.getFields(), fetchSource, aggregation.getBucketHitsLimit(), (int) bucket.getDocCount(), null, topHits.getHits());
} else {
hits = new Hits<>(Collections.emptyList(), null, aggregation.getBucketHitsLimit(), (int) bucket.getDocCount());
}
buckets.put(bucket.getKey(), new Bucket<>(bucket.getKey(), hits));
}
return new Aggregation<>(aggregationName, buckets.build());
}
Aggregations