use of org.opensearch.search.aggregations.pipeline.PipelineAggregator in project OpenSearch by opensearch-project.
the class InternalFilterTests method testReducePipelinesReducesBucketPipelines.
public void testReducePipelinesReducesBucketPipelines() {
/*
* Tests that a pipeline buckets by creating a mock pipeline that
* replaces "inner" with "dummy".
*/
InternalFilter dummy = createTestInstance();
InternalFilter inner = createTestInstance();
InternalAggregations sub = InternalAggregations.from(Collections.singletonList(inner));
InternalFilter test = createTestInstance("test", randomNonNegativeLong(), sub, emptyMap());
PipelineAggregator mockPipeline = new PipelineAggregator(null, null, null) {
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
return dummy;
}
};
PipelineTree tree = new PipelineTree(org.opensearch.common.collect.Map.of(inner.getName(), new PipelineTree(emptyMap(), singletonList(mockPipeline))), emptyList());
InternalFilter reduced = (InternalFilter) test.reducePipelines(test, emptyReduceContextBuilder().forFinalReduction(), tree);
assertThat(reduced.getAggregations().get(dummy.getName()), sameInstance(dummy));
}
use of org.opensearch.search.aggregations.pipeline.PipelineAggregator in project OpenSearch by opensearch-project.
the class InternalAggregations method topLevelReduce.
/**
* Begin the reduction process. This should be the entry point for the "first" reduction, e.g. called by
* SearchPhaseController or anywhere else that wants to initiate a reduction. It _should not_ be called
* as an intermediate reduction step (e.g. in the middle of an aggregation tree).
*
* This method first reduces the aggregations, and if it is the final reduce, then reduce the pipeline
* aggregations (both embedded parent/sibling as well as top-level sibling pipelines)
*/
public static InternalAggregations topLevelReduce(List<InternalAggregations> aggregationsList, ReduceContext context) {
InternalAggregations reduced = reduce(aggregationsList, context, reducedAggregations -> new InternalAggregations(reducedAggregations, context.pipelineTreeForBwcSerialization()));
if (reduced == null) {
return null;
}
if (context.isFinalReduce()) {
List<InternalAggregation> reducedInternalAggs = reduced.getInternalAggregations();
reducedInternalAggs = reducedInternalAggs.stream().map(agg -> agg.reducePipelines(agg, context, context.pipelineTreeRoot().subTree(agg.getName()))).collect(Collectors.toList());
for (PipelineAggregator pipelineAggregator : context.pipelineTreeRoot().aggregators()) {
SiblingPipelineAggregator sib = (SiblingPipelineAggregator) pipelineAggregator;
InternalAggregation newAgg = sib.doReduce(from(reducedInternalAggs), context);
reducedInternalAggs.add(newAgg);
}
return from(reducedInternalAggs);
}
return reduced;
}
use of org.opensearch.search.aggregations.pipeline.PipelineAggregator in project OpenSearch by opensearch-project.
the class AggregatorTestCase method searchAndReduce.
/**
* Collects all documents that match the provided query {@link Query} and
* returns the reduced {@link InternalAggregation}.
* <p>
* Half the time it aggregates each leaf individually and reduces all
* results together. The other half the time it aggregates across the entire
* index at once and runs a final reduction on the single resulting agg.
*/
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSettings indexSettings, IndexSearcher searcher, Query query, AggregationBuilder builder, int maxBucket, MappedFieldType... fieldTypes) throws IOException {
final IndexReaderContext ctx = searcher.getTopReaderContext();
final PipelineTree pipelines = builder.buildPipelineTree();
List<InternalAggregation> aggs = new ArrayList<>();
Query rewritten = searcher.rewrite(query);
MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes);
if (randomBoolean() && searcher.getIndexReader().leaves().size() > 0) {
assertThat(ctx, instanceOf(CompositeReaderContext.class));
final CompositeReaderContext compCTX = (CompositeReaderContext) ctx;
final int size = compCTX.leaves().size();
final ShardSearcher[] subSearchers = new ShardSearcher[size];
for (int searcherIDX = 0; searcherIDX < subSearchers.length; searcherIDX++) {
final LeafReaderContext leave = compCTX.leaves().get(searcherIDX);
subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX);
}
for (ShardSearcher subSearcher : subSearchers) {
MultiBucketConsumer shardBucketConsumer = new MultiBucketConsumer(maxBucket, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
C a = createAggregator(query, builder, subSearcher, indexSettings, shardBucketConsumer, fieldTypes);
a.preCollection();
Weight weight = subSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f);
subSearcher.search(weight, a);
a.postCollection();
aggs.add(a.buildTopLevel());
}
} else {
root.preCollection();
searcher.search(rewritten, root);
root.postCollection();
aggs.add(root.buildTopLevel());
}
if (randomBoolean() && aggs.size() > 1) {
// sometimes do an incremental reduce
int toReduceSize = aggs.size();
Collections.shuffle(aggs, random());
int r = randomIntBetween(1, toReduceSize);
List<InternalAggregation> toReduce = aggs.subList(0, r);
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forPartialReduction(root.context().bigArrays(), getMockScriptService(), () -> PipelineAggregator.PipelineTree.EMPTY);
A reduced = (A) aggs.get(0).reduce(toReduce, context);
aggs = new ArrayList<>(aggs.subList(r, toReduceSize));
aggs.add(reduced);
}
// now do the final reduce
MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumer(maxBucket, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction(root.context().bigArrays(), getMockScriptService(), reduceBucketConsumer, pipelines);
@SuppressWarnings("unchecked") A internalAgg = (A) aggs.get(0).reduce(aggs, context);
// materialize any parent pipelines
internalAgg = (A) internalAgg.reducePipelines(internalAgg, context, pipelines);
// materialize any sibling pipelines at top level
for (PipelineAggregator pipelineAggregator : pipelines.aggregators()) {
internalAgg = (A) pipelineAggregator.reduce(internalAgg, context);
}
doAssertReducedMultiBucketConsumer(internalAgg, reduceBucketConsumer);
return internalAgg;
}
use of org.opensearch.search.aggregations.pipeline.PipelineAggregator in project OpenSearch by opensearch-project.
the class AggregatorFactoriesTests method testBuildPipelineTreeResolvesPipelineOrder.
public void testBuildPipelineTreeResolvesPipelineOrder() {
AggregatorFactories.Builder builder = new AggregatorFactories.Builder();
builder.addPipelineAggregator(PipelineAggregatorBuilders.avgBucket("bar", "foo"));
builder.addPipelineAggregator(PipelineAggregatorBuilders.avgBucket("foo", "real"));
builder.addAggregator(AggregationBuilders.avg("real").field("target"));
PipelineTree tree = builder.buildPipelineTree();
assertThat(tree.aggregators().stream().map(PipelineAggregator::name).collect(toList()), equalTo(Arrays.asList("foo", "bar")));
}
use of org.opensearch.search.aggregations.pipeline.PipelineAggregator in project OpenSearch by opensearch-project.
the class InternalFiltersTests method testReducePipelinesReducesBucketPipelines.
public void testReducePipelinesReducesBucketPipelines() {
/*
* Tests that a pipeline buckets by creating a mock pipeline that
* replaces "inner" with "dummy".
*/
InternalFilters dummy = createTestInstance();
InternalFilters inner = createTestInstance();
InternalAggregations sub = InternalAggregations.from(Collections.singletonList(inner));
InternalFilters test = createTestInstance("test", emptyMap(), sub);
PipelineAggregator mockPipeline = new PipelineAggregator(null, null, null) {
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
return dummy;
}
};
PipelineTree tree = new PipelineTree(org.opensearch.common.collect.Map.of(inner.getName(), new PipelineTree(emptyMap(), singletonList(mockPipeline))), emptyList());
InternalFilters reduced = (InternalFilters) test.reducePipelines(test, emptyReduceContextBuilder().forFinalReduction(), tree);
for (InternalFilters.InternalBucket bucket : reduced.getBuckets()) {
assertThat(bucket.getAggregations().get(dummy.getName()), sameInstance(dummy));
}
}
Aggregations