use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.
the class SignificantStringTermsTests method createTestInstance.
@Override
protected InternalSignificantTerms createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
DocValueFormat format = DocValueFormat.RAW;
int requiredSize = randomIntBetween(1, 5);
int shardSize = requiredSize + 2;
final int numBuckets = randomInt(shardSize);
long globalSubsetSize = 0;
long globalSupersetSize = 0;
List<SignificantStringTerms.Bucket> buckets = new ArrayList<>(numBuckets);
Set<BytesRef> terms = new HashSet<>();
for (int i = 0; i < numBuckets; ++i) {
BytesRef term = randomValueOtherThanMany(b -> terms.add(b) == false, () -> new BytesRef(randomAsciiOfLength(10)));
int subsetDf = randomIntBetween(1, 10);
int supersetDf = randomIntBetween(subsetDf, 20);
int supersetSize = randomIntBetween(supersetDf, 30);
globalSubsetSize += subsetDf;
globalSupersetSize += supersetSize;
buckets.add(new SignificantStringTerms.Bucket(term, subsetDf, subsetDf, supersetDf, supersetSize, EMPTY, format));
}
return new SignificantStringTerms(name, requiredSize, 1L, pipelineAggregators, metaData, format, globalSubsetSize, globalSupersetSize, significanceHeuristic, buckets);
}
use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.
the class DoubleTermsTests method createTestInstance.
@Override
protected InternalTerms<?, ?> createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
Terms.Order order = Terms.Order.count(false);
long minDocCount = 1;
int requiredSize = 3;
int shardSize = requiredSize + 2;
DocValueFormat format = DocValueFormat.RAW;
boolean showTermDocCountError = false;
long docCountError = -1;
long otherDocCount = 0;
List<DoubleTerms.Bucket> buckets = new ArrayList<>();
final int numBuckets = randomInt(shardSize);
Set<Double> terms = new HashSet<>();
for (int i = 0; i < numBuckets; ++i) {
double term = randomValueOtherThanMany(d -> terms.add(d) == false, random()::nextDouble);
int docCount = randomIntBetween(1, 100);
buckets.add(new DoubleTerms.Bucket(term, docCount, InternalAggregations.EMPTY, showTermDocCountError, docCountError, format));
}
return new DoubleTerms(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
}
use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.
the class LongTermsTests method createTestInstance.
@Override
protected InternalTerms<?, ?> createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
Terms.Order order = Terms.Order.count(false);
long minDocCount = 1;
int requiredSize = 3;
int shardSize = requiredSize + 2;
DocValueFormat format = DocValueFormat.RAW;
boolean showTermDocCountError = false;
long docCountError = -1;
long otherDocCount = 0;
List<LongTerms.Bucket> buckets = new ArrayList<>();
final int numBuckets = randomInt(shardSize);
Set<Long> terms = new HashSet<>();
for (int i = 0; i < numBuckets; ++i) {
long term = randomValueOtherThanMany(l -> terms.add(l) == false, random()::nextLong);
int docCount = randomIntBetween(1, 100);
buckets.add(new LongTerms.Bucket(term, docCount, InternalAggregations.EMPTY, showTermDocCountError, docCountError, format));
}
return new LongTerms(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
}
use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.
the class InternalDateHistogramTests method createTestInstance.
@Override
protected InternalDateHistogram createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
boolean keyed = randomBoolean();
DocValueFormat format = DocValueFormat.RAW;
int nbBuckets = randomInt(10);
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
long startingDate = System.currentTimeMillis();
long interval = randomIntBetween(1, 3);
long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();
for (int i = 0; i < nbBuckets; i++) {
long key = startingDate + (intervalMillis * i);
buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, InternalAggregations.EMPTY));
}
InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC);
return new InternalDateHistogram(name, buckets, order, 1, 0L, null, format, keyed, pipelineAggregators, metaData);
}
use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.
the class InternalHistogramTests method createTestInstance.
@Override
protected InternalHistogram createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
final boolean keyed = randomBoolean();
final DocValueFormat format = DocValueFormat.RAW;
final int base = randomInt(50) - 30;
final int numBuckets = randomInt(10);
final int interval = randomIntBetween(1, 3);
List<InternalHistogram.Bucket> buckets = new ArrayList<>();
for (int i = 0; i < numBuckets; ++i) {
final int docCount = TestUtil.nextInt(random(), 1, 50);
buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, keyed, format, InternalAggregations.EMPTY));
}
return new InternalHistogram(name, buckets, (InternalOrder) InternalHistogram.Order.KEY_ASC, 1, null, format, keyed, pipelineAggregators, metaData);
}
Aggregations