use of org.opensearch.search.aggregations.AggregationExecutionException in project OpenSearch by opensearch-project.
the class WeightedAvgAggregatorTests method testMultiWeight.
public void testMultiWeight() throws IOException {
MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build();
MultiValuesSourceFieldConfig weightConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("weight_field").build();
WeightedAvgAggregationBuilder aggregationBuilder = new WeightedAvgAggregationBuilder("_name").value(valueConfig).weight(weightConfig);
AggregationExecutionException e = expectThrows(AggregationExecutionException.class, () -> testCase(new MatchAllDocsQuery(), aggregationBuilder, iw -> {
iw.addDocument(Arrays.asList(new SortedNumericDocValuesField("value_field", 2), new SortedNumericDocValuesField("weight_field", 2), new SortedNumericDocValuesField("weight_field", 3)));
iw.addDocument(Arrays.asList(new SortedNumericDocValuesField("value_field", 3), new SortedNumericDocValuesField("weight_field", 3), new SortedNumericDocValuesField("weight_field", 4)));
iw.addDocument(Arrays.asList(new SortedNumericDocValuesField("value_field", 4), new SortedNumericDocValuesField("weight_field", 4), new SortedNumericDocValuesField("weight_field", 5)));
}, avg -> {
fail("Should have thrown exception");
}));
assertThat(e.getMessage(), containsString("Encountered more than one weight for a single document. " + "Use a script to combine multiple weights-per-doc into a single value."));
}
use of org.opensearch.search.aggregations.AggregationExecutionException in project OpenSearch by opensearch-project.
the class ParentAggregatorFactory method doCreateInternal.
@Override
protected Aggregator doCreateInternal(SearchContext searchContext, Aggregator children, CardinalityUpperBound cardinality, Map<String, Object> metadata) throws IOException {
ValuesSource rawValuesSource = config.getValuesSource();
if (rawValuesSource instanceof WithOrdinals == false) {
throw new AggregationExecutionException("ValuesSource type " + rawValuesSource.toString() + "is not supported for aggregation " + this.name());
}
WithOrdinals valuesSource = (WithOrdinals) rawValuesSource;
long maxOrd = valuesSource.globalMaxOrd(searchContext.searcher());
return new ChildrenToParentAggregator(name, factories, searchContext, children, childFilter, parentFilter, valuesSource, maxOrd, cardinality, metadata);
}
use of org.opensearch.search.aggregations.AggregationExecutionException in project OpenSearch by opensearch-project.
the class ChildrenAggregatorFactory method doCreateInternal.
@Override
protected Aggregator doCreateInternal(SearchContext searchContext, Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata) throws IOException {
ValuesSource rawValuesSource = config.getValuesSource();
if (rawValuesSource instanceof WithOrdinals == false) {
throw new AggregationExecutionException("ValuesSource type " + rawValuesSource.toString() + "is not supported for aggregation " + this.name());
}
WithOrdinals valuesSource = (WithOrdinals) rawValuesSource;
long maxOrd = valuesSource.globalMaxOrd(searchContext.searcher());
return new ParentToChildrenAggregator(name, factories, searchContext, parent, childFilter, parentFilter, valuesSource, maxOrd, cardinality, metadata);
}
use of org.opensearch.search.aggregations.AggregationExecutionException in project OpenSearch by opensearch-project.
the class InternalTerms method reduce.
public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
long sumDocCountError = 0;
long otherDocCount = 0;
InternalTerms<A, B> referenceTerms = null;
for (InternalAggregation aggregation : aggregations) {
@SuppressWarnings("unchecked") InternalTerms<A, B> terms = (InternalTerms<A, B>) aggregation;
if (referenceTerms == null && aggregation.getClass().equals(UnmappedTerms.class) == false) {
referenceTerms = terms;
}
if (referenceTerms != null && referenceTerms.getClass().equals(terms.getClass()) == false && terms.getClass().equals(UnmappedTerms.class) == false) {
// is of different types in different indices.
throw new AggregationExecutionException("Merging/Reducing the aggregations failed when computing the aggregation [" + referenceTerms.getName() + "] because the field you gave in the aggregation query existed as two different " + "types in two different indices");
}
otherDocCount += terms.getSumOfOtherDocCounts();
final long thisAggDocCountError = getDocCountError(terms);
if (sumDocCountError != -1) {
if (thisAggDocCountError == -1) {
sumDocCountError = -1;
} else {
sumDocCountError += thisAggDocCountError;
}
}
setDocCountError(thisAggDocCountError);
for (B bucket : terms.getBuckets()) {
// If there is already a doc count error for this bucket
// subtract this aggs doc count error from it to make the
// new value for the bucket. This then means that when the
// final error for the bucket is calculated below we account
// for the existing error calculated in a previous reduce.
// Note that if the error is unbounded (-1) this will be fixed
// later in this method.
bucket.docCountError -= thisAggDocCountError;
}
}
final List<B> reducedBuckets;
/**
* Buckets returned by a partial reduce or a shard response are sorted by key since {@link LegacyESVersion#V_7_10_0}.
* That allows to perform a merge sort when reducing multiple aggregations together.
* For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of
* the provided aggregations use a different {@link InternalTerms#reduceOrder}.
*/
BucketOrder thisReduceOrder = getReduceOrder(aggregations);
if (isKeyOrder(thisReduceOrder)) {
// extract the primary sort in case this is a compound order.
thisReduceOrder = InternalOrder.key(isKeyAsc(thisReduceOrder) ? true : false);
reducedBuckets = reduceMergeSort(aggregations, thisReduceOrder, reduceContext);
} else {
reducedBuckets = reduceLegacy(aggregations, reduceContext);
}
final B[] list;
if (reduceContext.isFinalReduce()) {
final int size = Math.min(requiredSize, reducedBuckets.size());
// final comparator
final BucketPriorityQueue<B> ordered = new BucketPriorityQueue<>(size, order.comparator());
for (B bucket : reducedBuckets) {
if (sumDocCountError == -1) {
bucket.docCountError = -1;
} else {
bucket.docCountError += sumDocCountError;
}
if (bucket.docCount >= minDocCount) {
B removed = ordered.insertWithOverflow(bucket);
if (removed != null) {
otherDocCount += removed.getDocCount();
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed));
} else {
reduceContext.consumeBucketsAndMaybeBreak(1);
}
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket));
}
}
list = createBucketsArray(ordered.size());
for (int i = ordered.size() - 1; i >= 0; i--) {
list[i] = ordered.pop();
}
} else {
// we can prune the list on partial reduce if the aggregation is ordered by key
// and not filtered (minDocCount == 0)
int size = isKeyOrder(order) && minDocCount == 0 ? Math.min(requiredSize, reducedBuckets.size()) : reducedBuckets.size();
list = createBucketsArray(size);
for (int i = 0; i < size; i++) {
reduceContext.consumeBucketsAndMaybeBreak(1);
list[i] = reducedBuckets.get(i);
if (sumDocCountError == -1) {
list[i].docCountError = -1;
} else {
list[i].docCountError += sumDocCountError;
}
}
}
long docCountError;
if (sumDocCountError == -1) {
docCountError = -1;
} else {
docCountError = aggregations.size() == 1 ? 0 : sumDocCountError;
}
return create(name, Arrays.asList(list), reduceContext.isFinalReduce() ? order : thisReduceOrder, docCountError, otherDocCount);
}
use of org.opensearch.search.aggregations.AggregationExecutionException in project OpenSearch by opensearch-project.
the class ReverseNestedAggregationBuilder method doBuild.
@Override
protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException {
if (findNestedAggregatorFactory(parent) == null) {
throw new IllegalArgumentException("Reverse nested aggregation [" + name + "] can only be used inside a [nested] aggregation");
}
ObjectMapper parentObjectMapper = null;
if (path != null) {
parentObjectMapper = queryShardContext.getObjectMapper(path);
if (parentObjectMapper == null) {
return new ReverseNestedAggregatorFactory(name, true, null, queryShardContext, parent, subFactoriesBuilder, metadata);
}
if (parentObjectMapper.nested().isNested() == false) {
throw new AggregationExecutionException("[reverse_nested] nested path [" + path + "] is not nested");
}
}
NestedScope nestedScope = queryShardContext.nestedScope();
try {
nestedScope.nextLevel(parentObjectMapper);
return new ReverseNestedAggregatorFactory(name, false, parentObjectMapper, queryShardContext, parent, subFactoriesBuilder, metadata);
} finally {
nestedScope.previousLevel();
}
}
Aggregations