use of org.opensearch.search.aggregations.metrics.SumAggregationBuilder in project fess by codelibs.
the class EsAbstractConditionAggregation method regSumA.
protected SumAggregationBuilder regSumA(String name, String field) {
SumAggregationBuilder builder = AggregationBuilders.sum(name).field(field);
regA(builder);
return builder;
}
use of org.opensearch.search.aggregations.metrics.SumAggregationBuilder in project fess by codelibs.
the class EsAbstractConditionAggregation method regSumA.
protected SumAggregationBuilder regSumA(String name, String field) {
SumAggregationBuilder builder = AggregationBuilders.sum(name).field(field);
regA(builder);
return builder;
}
use of org.opensearch.search.aggregations.metrics.SumAggregationBuilder in project fess by codelibs.
the class EsAbstractConditionAggregation method regSumA.
protected SumAggregationBuilder regSumA(String name, String field) {
SumAggregationBuilder builder = AggregationBuilders.sum(name).field(field);
regA(builder);
return builder;
}
use of org.opensearch.search.aggregations.metrics.SumAggregationBuilder in project OpenSearch by opensearch-project.
the class MaxBucketIT method testFieldIsntWrittenOutTwice.
/**
* https://github.com/elastic/elasticsearch/issues/33514
*
* This bug manifests as the max_bucket agg ("peak") being added to the response twice, because
* the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps.
* The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms
* delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then
* execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values.
*
* Applies to any pipeline agg, not just max.
*/
public void testFieldIsntWrittenOutTwice() throws Exception {
// you need to add an additional index with no fields in order to trigger this (or potentially a shard)
// so that there is an UnmappedTerms in the list to reduce.
createIndex("foo_1");
XContentBuilder builder = jsonBuilder().startObject().startObject("properties").startObject("@timestamp").field("type", "date").endObject().startObject("license").startObject("properties").startObject("count").field("type", "long").endObject().startObject("partnumber").field("type", "text").startObject("fields").startObject("keyword").field("type", "keyword").field("ignore_above", 256).endObject().endObject().endObject().endObject().endObject().endObject().endObject();
assertAcked(client().admin().indices().prepareCreate("foo_2").addMapping("doc", builder).get());
XContentBuilder docBuilder = jsonBuilder().startObject().startObject("license").field("partnumber", "foobar").field("count", 2).endObject().field("@timestamp", "2018-07-08T08:07:00.599Z").endObject();
client().prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
client().admin().indices().prepareRefresh();
TermsAggregationBuilder groupByLicenseAgg = AggregationBuilders.terms("group_by_license_partnumber").field("license.partnumber.keyword");
MaxBucketPipelineAggregationBuilder peakPipelineAggBuilder = PipelineAggregatorBuilders.maxBucket("peak", "licenses_per_day>total_licenses");
SumAggregationBuilder sumAggBuilder = AggregationBuilders.sum("total_licenses").field("license.count");
DateHistogramAggregationBuilder licensePerDayBuilder = AggregationBuilders.dateHistogram("licenses_per_day").field("@timestamp").dateHistogramInterval(DateHistogramInterval.DAY);
licensePerDayBuilder.subAggregation(sumAggBuilder);
groupByLicenseAgg.subAggregation(licensePerDayBuilder);
groupByLicenseAgg.subAggregation(peakPipelineAggBuilder);
SearchResponse response = client().prepareSearch("foo_*").setSize(0).addAggregation(groupByLicenseAgg).get();
BytesReference bytes = XContentHelper.toXContent(response, XContentType.JSON, false);
XContentHelper.convertToMap(bytes, false, XContentType.JSON);
}
use of org.opensearch.search.aggregations.metrics.SumAggregationBuilder in project OpenSearch by opensearch-project.
the class NestedAggregatorTests method testOrphanedDocs.
public void testOrphanedDocs() throws IOException {
int numRootDocs = randomIntBetween(1, 20);
int expectedNestedDocs = 0;
double expectedSum = 0;
try (Directory directory = newDirectory()) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
for (int i = 0; i < numRootDocs; i++) {
List<Document> documents = new ArrayList<>();
int numNestedDocs = randomIntBetween(0, 20);
expectedSum += generateSumDocs(documents, numNestedDocs, i, NESTED_OBJECT, VALUE_FIELD_NAME);
expectedNestedDocs += numNestedDocs;
Document document = new Document();
document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
}
// add some random nested docs that don't belong
List<Document> documents = new ArrayList<>();
int numOrphanedDocs = randomIntBetween(0, 20);
generateSumDocs(documents, numOrphanedDocs, 1234, "foo", VALUE_FIELD_NAME);
iw.addDocuments(documents);
iw.commit();
}
try (IndexReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, NESTED_OBJECT);
SumAggregationBuilder sumAgg = new SumAggregationBuilder(SUM_AGG_NAME).field(VALUE_FIELD_NAME);
nestedBuilder.subAggregation(sumAgg);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(VALUE_FIELD_NAME, NumberFieldMapper.NumberType.LONG);
InternalNested nested = searchAndReduce(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(expectedNestedDocs, nested.getDocCount());
assertEquals(NESTED_AGG, nested.getName());
assertEquals(expectedNestedDocs, nested.getDocCount());
InternalSum sum = (InternalSum) ((InternalAggregation) nested).getProperty(SUM_AGG_NAME);
assertEquals(SUM_AGG_NAME, sum.getName());
assertEquals(expectedSum, sum.getValue(), Double.MIN_VALUE);
}
}
}
Aggregations