use of org.opensearch.common.util.MockPageCacheRecycler in project OpenSearch by opensearch-project.
the class InternalVariableWidthHistogramTests method testMultipleShardsReduce.
public void testMultipleShardsReduce() {
InternalVariableWidthHistogram dummy_histogram = createEmptyTestInstance();
List<InternalVariableWidthHistogram.Bucket> buckets1 = new ArrayList<>();
for (long value : new long[] { 1, 5, 6, 10 }) {
InternalVariableWidthHistogram.Bucket.BucketBounds bounds = new InternalVariableWidthHistogram.Bucket.BucketBounds(value, value + 1);
InternalVariableWidthHistogram.Bucket bucket = new InternalVariableWidthHistogram.Bucket(value, bounds, 1, format, InternalAggregations.EMPTY);
buckets1.add(bucket);
}
List<InternalVariableWidthHistogram.Bucket> buckets2 = new ArrayList<>();
for (long value : new long[] { 2, 3, 6, 7 }) {
InternalVariableWidthHistogram.Bucket.BucketBounds bounds = new InternalVariableWidthHistogram.Bucket.BucketBounds(value, value + 1);
InternalVariableWidthHistogram.Bucket bucket = new InternalVariableWidthHistogram.Bucket(value, bounds, 1, format, InternalAggregations.EMPTY);
buckets2.add(bucket);
}
List<InternalVariableWidthHistogram.Bucket> buckets3 = new ArrayList<>();
for (long value : new long[] { 0, 2, 12 }) {
InternalVariableWidthHistogram.Bucket.BucketBounds bounds = new InternalVariableWidthHistogram.Bucket.BucketBounds(value, value + 1);
InternalVariableWidthHistogram.Bucket bucket = new InternalVariableWidthHistogram.Bucket(value, bounds, 1, format, InternalAggregations.EMPTY);
buckets3.add(bucket);
}
InternalVariableWidthHistogram histogram1 = dummy_histogram.create(buckets1);
InternalVariableWidthHistogram histogram2 = dummy_histogram.create(buckets2);
InternalVariableWidthHistogram histogram3 = dummy_histogram.create(buckets3);
MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService mockScriptService = mockScriptService();
MultiBucketConsumerService.MultiBucketConsumer bucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer(DEFAULT_MAX_BUCKETS, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction(bigArrays, mockScriptService, bucketConsumer, PipelineAggregator.PipelineTree.EMPTY);
ArrayList<InternalAggregation> aggs = new ArrayList<>();
aggs.add(histogram1);
aggs.add(histogram2);
aggs.add(histogram3);
List<InternalVariableWidthHistogram.Bucket> reduced_buckets = ((InternalVariableWidthHistogram) histogram1.reduce(aggs, context)).getBuckets();
// Final clusters should be [ (0, 1, 2, 2, 3), (5, 6, 6, 7), (10, 12) ]
// Final centroids should be [ 2, 6, 11 ]
// Final keys should be [ 1, 5, 10 ]
double double_error = 1d / 10000d;
assertEquals(0d, reduced_buckets.get(0).min(), double_error);
assertEquals(1.6d, (double) reduced_buckets.get(0).getKey(), double_error);
assertEquals(5, reduced_buckets.get(0).getDocCount());
assertEquals(5d, reduced_buckets.get(1).min(), double_error);
assertEquals(6d, (double) reduced_buckets.get(1).getKey(), double_error);
assertEquals(4, reduced_buckets.get(1).getDocCount());
assertEquals(10d, reduced_buckets.get(2).min(), double_error);
assertEquals(11d, (double) reduced_buckets.get(2).getKey(), double_error);
assertEquals(2, reduced_buckets.get(2).getDocCount());
}
use of org.opensearch.common.util.MockPageCacheRecycler in project OpenSearch by opensearch-project.
the class TermsAggregatorTests method testMixLongAndDouble.
public void testMixLongAndDouble() throws Exception {
for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) {
TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(ValueType.LONG).executionHint(executionMode.toString()).field("number").order(BucketOrder.key(true));
List<InternalAggregation> aggs = new ArrayList<>();
int numLongs = randomIntBetween(1, 3);
for (int i = 0; i < numLongs; i++) {
final Directory dir;
try (IndexReader reader = createIndexWithLongs()) {
dir = ((DirectoryReader) reader).directory();
IndexSearcher searcher = new IndexSearcher(reader);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG);
aggs.add(buildInternalAggregation(aggregationBuilder, fieldType, searcher));
}
dir.close();
}
int numDoubles = randomIntBetween(1, 3);
for (int i = 0; i < numDoubles; i++) {
final Directory dir;
try (IndexReader reader = createIndexWithDoubles()) {
dir = ((DirectoryReader) reader).directory();
IndexSearcher searcher = new IndexSearcher(reader);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.DOUBLE);
aggs.add(buildInternalAggregation(aggregationBuilder, fieldType, searcher));
}
dir.close();
}
InternalAggregation.ReduceContext ctx = InternalAggregation.ReduceContext.forFinalReduction(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), null, b -> {
}, PipelineTree.EMPTY);
for (InternalAggregation internalAgg : aggs) {
InternalAggregation mergedAggs = internalAgg.reduce(aggs, ctx);
assertTrue(mergedAggs instanceof DoubleTerms);
long expected = numLongs + numDoubles;
List<? extends Terms.Bucket> buckets = ((DoubleTerms) mergedAggs).getBuckets();
assertEquals(4, buckets.size());
assertEquals("1.0", buckets.get(0).getKeyAsString());
assertEquals(expected, buckets.get(0).getDocCount());
assertEquals("10.0", buckets.get(1).getKeyAsString());
assertEquals(expected * 2, buckets.get(1).getDocCount());
assertEquals("100.0", buckets.get(2).getKeyAsString());
assertEquals(expected * 2, buckets.get(2).getDocCount());
assertEquals("1000.0", buckets.get(3).getKeyAsString());
assertEquals(expected, buckets.get(3).getDocCount());
}
}
}
use of org.opensearch.common.util.MockPageCacheRecycler in project OpenSearch by opensearch-project.
the class InternalCardinalityTests method createTestInstance.
@Override
protected InternalCardinality createTestInstance(String name, Map<String, Object> metadata) {
HyperLogLogPlusPlus hllpp = new HyperLogLogPlusPlus(p, new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), 1);
algos.add(hllpp);
for (int i = 0; i < 100; i++) {
hllpp.collect(0, BitMixer.mix64(randomIntBetween(1, 100)));
}
return new InternalCardinality(name, hllpp, metadata);
}
use of org.opensearch.common.util.MockPageCacheRecycler in project OpenSearch by opensearch-project.
the class InternalAggregationTestCase method testReduceRandom.
public void testReduceRandom() throws IOException {
String name = randomAlphaOfLength(5);
int size = between(1, 200);
List<T> inputs = randomResultsToReduce(name, size);
assertThat(inputs, hasSize(size));
List<InternalAggregation> toReduce = new ArrayList<>();
toReduce.addAll(inputs);
// Sort aggs so that unmapped come last. This mimicks the behavior of InternalAggregations.reduce()
inputs.sort(INTERNAL_AGG_COMPARATOR);
ScriptService mockScriptService = mockScriptService();
MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
if (randomBoolean() && toReduce.size() > 1) {
// sometimes do a partial reduce
Collections.shuffle(toReduce, random());
int r = randomIntBetween(1, inputs.size());
List<InternalAggregation> toPartialReduce = toReduce.subList(0, r);
// Sort aggs so that unmapped come last. This mimicks the behavior of InternalAggregations.reduce()
toPartialReduce.sort(INTERNAL_AGG_COMPARATOR);
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forPartialReduction(bigArrays, mockScriptService, () -> PipelineAggregator.PipelineTree.EMPTY);
@SuppressWarnings("unchecked") T reduced = (T) toPartialReduce.get(0).reduce(toPartialReduce, context);
int initialBucketCount = 0;
for (InternalAggregation internalAggregation : toPartialReduce) {
initialBucketCount += countInnerBucket(internalAggregation);
}
int reducedBucketCount = countInnerBucket(reduced);
// check that non final reduction never adds buckets
assertThat(reducedBucketCount, lessThanOrEqualTo(initialBucketCount));
/*
* Sometimes serializing and deserializing the partially reduced
* result to simulate the compaction that we attempt after a
* partial reduce. And to simulate cross cluster search.
*/
if (randomBoolean()) {
reduced = copyNamedWriteable(reduced, getNamedWriteableRegistry(), categoryClass());
}
toReduce = new ArrayList<>(toReduce.subList(r, inputs.size()));
toReduce.add(reduced);
}
MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(DEFAULT_MAX_BUCKETS, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction(bigArrays, mockScriptService, bucketConsumer, PipelineTree.EMPTY);
@SuppressWarnings("unchecked") T reduced = (T) inputs.get(0).reduce(toReduce, context);
doAssertReducedMultiBucketConsumer(reduced, bucketConsumer);
assertReduced(reduced, inputs);
}
use of org.opensearch.common.util.MockPageCacheRecycler in project OpenSearch by opensearch-project.
the class MockTransportService method newMockTransport.
public static MockNioTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) {
settings = Settings.builder().put(TransportSettings.PORT.getKey(), OpenSearchTestCase.getPortRange()).put(settings).build();
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables());
return new MockNioTransport(settings, version, threadPool, new NetworkService(Collections.emptyList()), new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService());
}
Aggregations