use of org.elasticsearch.search.aggregations.Aggregation in project graylog2-server by Graylog2.
the class ESPivot method processRows.
/*
results from elasticsearch are nested so we need to recurse into the aggregation tree, but our result is a table, thus we need
to keep track of the current row keys manually
*/
private void processRows(PivotResult.Builder resultBuilder, SearchResponse searchResult, ESGeneratedQueryContext queryContext, Pivot pivot, List<BucketSpec> remainingRows, ArrayDeque<String> rowKeys, HasAggregations aggregation) {
if (remainingRows.isEmpty()) {
// this is the last row group, so we need to fork into the columns if they exist.
// being here also means that `rowKeys` contains the maximum number of parts, one for each combination of row bucket keys
// we will always add the series for this bucket, because that's the entire point of row groups
final PivotResult.Row.Builder rowBuilder = PivotResult.Row.builder().key(ImmutableList.copyOf(rowKeys));
// do the same for columns as we did for the rows
processColumns(rowBuilder, searchResult, queryContext, pivot, pivot.columnGroups(), new ArrayDeque<>(), aggregation);
// columnKeys is empty, because this is a rollup per row bucket, thus for all columns in that bucket (IOW it's not a leaf!)
if (pivot.rollup()) {
processSeries(rowBuilder, searchResult, queryContext, pivot, new ArrayDeque<>(), aggregation, true, "row-leaf");
}
resultBuilder.addRow(rowBuilder.source("leaf").build());
} else {
// this is not a leaf for the rows, so we add its key to the rowKeys and descend into the aggregation tree
// afterwards we'll check if we need to add rollup for intermediate buckets. not all clients need them so they can request
// to not calculate them
final BucketSpec currentBucket = remainingRows.get(0);
// this handler should never be missing, because we used it above to generate the query
// if it is missing for some weird reason, it's ok to fail hard here
final ESPivotBucketSpecHandler<? extends PivotSpec, ? extends Aggregation> handler = bucketHandlers.get(currentBucket.type());
final Aggregation aggregationResult = handler.extractAggregationFromResult(pivot, currentBucket, aggregation, queryContext);
final Stream<ESPivotBucketSpecHandler.Bucket> bucketStream = handler.handleResult(pivot, currentBucket, searchResult, aggregationResult, this, queryContext);
// for each bucket, recurse and eventually collect all the row keys. once we reach a leaf, we'll end up in the other if branch above
bucketStream.forEach(bucket -> {
// push the bucket's key and use its aggregation as the new source for sub-aggregations
rowKeys.addLast(bucket.key());
processRows(resultBuilder, searchResult, queryContext, pivot, tail(remainingRows), rowKeys, bucket.aggregation());
rowKeys.removeLast();
});
// also add the series for this row key if the client wants rollups
if (pivot.rollup()) {
final PivotResult.Row.Builder rowBuilder = PivotResult.Row.builder().key(ImmutableList.copyOf(rowKeys));
// columnKeys is empty, because this is a rollup per row bucket, thus for all columns in that bucket (IOW it's not a leaf!)
processSeries(rowBuilder, searchResult, queryContext, pivot, new ArrayDeque<>(), aggregation, true, "row-inner");
resultBuilder.addRow(rowBuilder.source("non-leaf").build());
}
}
}
use of org.elasticsearch.search.aggregations.Aggregation in project graylog2-server by Graylog2.
the class ESPivot method processColumns.
private void processColumns(PivotResult.Row.Builder rowBuilder, SearchResponse searchResult, ESGeneratedQueryContext queryContext, Pivot pivot, List<BucketSpec> remainingColumns, ArrayDeque<String> columnKeys, HasAggregations aggregation) {
if (remainingColumns.isEmpty()) {
// with duplicate data entries
if (!columnKeys.isEmpty()) {
processSeries(rowBuilder, searchResult, queryContext, pivot, columnKeys, aggregation, false, "col-leaf");
}
} else {
// for a non-leaf column group, we need to recurse further into the aggregation tree
// and if rollup was requested we'll add intermediate series according to the column keys
final BucketSpec currentBucket = remainingColumns.get(0);
// this handler should never be missing, because we used it above to generate the query
// if it is missing for some weird reason, it's ok to fail hard here
final ESPivotBucketSpecHandler<? extends PivotSpec, ? extends Aggregation> handler = bucketHandlers.get(currentBucket.type());
final Aggregation aggregationResult = handler.extractAggregationFromResult(pivot, currentBucket, aggregation, queryContext);
final Stream<ESPivotBucketSpecHandler.Bucket> bucketStream = handler.handleResult(pivot, currentBucket, searchResult, aggregationResult, this, queryContext);
// for each bucket, recurse and eventually collect all the column keys. once we reach a leaf, we'll end up in the other if branch above
bucketStream.forEach(bucket -> {
// push the bucket's key and use its aggregation as the new source for sub-aggregations
columnKeys.addLast(bucket.key());
processColumns(rowBuilder, searchResult, queryContext, pivot, tail(remainingColumns), columnKeys, bucket.aggregation());
columnKeys.removeLast();
});
// don't add the empty column key rollup, because that's not the correct bucket here, it's being done in the row-leaf code
if (pivot.rollup() && !columnKeys.isEmpty()) {
// columnKeys is not empty, because this is a rollup per column in a row
processSeries(rowBuilder, searchResult, queryContext, pivot, columnKeys, aggregation, true, "col-inner");
}
}
}
use of org.elasticsearch.search.aggregations.Aggregation in project sonarqube by SonarSource.
the class Facets method processSubAggregations.
private void processSubAggregations(HasAggregations aggregation) {
if (Filter.class.isAssignableFrom(aggregation.getClass())) {
Filter filter = (Filter) aggregation;
if (filter.getName().startsWith(NO_DATA_PREFIX)) {
LinkedHashMap<String, Long> facet = getOrCreateFacet(filter.getName().replaceFirst(NO_DATA_PREFIX, ""));
facet.put("NO_DATA", ((Filter) aggregation).getDocCount());
}
}
for (Aggregation sub : aggregation.getAggregations()) {
processAggregation(sub);
}
}
use of org.elasticsearch.search.aggregations.Aggregation in project metron by apache.
the class ElasticsearchSearchDao method getFacetCounts.
private Map<String, Map<String, Long>> getFacetCounts(List<String> fields, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
for (String field : fields) {
Map<String, Long> valueCounts = new HashMap<>();
if (aggregations != null) {
Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
if (aggregation instanceof Terms) {
Terms terms = (Terms) aggregation;
terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
}
}
fieldCounts.put(field, valueCounts);
}
return fieldCounts;
}
use of org.elasticsearch.search.aggregations.Aggregation in project pancm_project by xuwujing.
the class EsAggregationSearchTest method agg.
private static void agg(List<Map<String, Object>> list, Aggregations aggregations) {
aggregations.forEach(aggregation -> {
String name = aggregation.getName();
Terms genders = aggregations.get(name);
for (Terms.Bucket entry : genders.getBuckets()) {
String key = entry.getKey().toString();
long t = entry.getDocCount();
Map<String, Object> map = new HashMap<>();
map.put(name, key);
map.put(name + "_" + "count", t);
// 判断里面是否还有嵌套的数据
List<Aggregation> list2 = entry.getAggregations().asList();
if (list2.isEmpty()) {
list.add(map);
} else {
agg(list, entry.getAggregations());
}
}
});
System.out.println(list);
}
Aggregations