Search in sources :

Example 1 with DocValueFormat

use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.

the class SignificantStringTermsTests method createTestInstance.

@Override
protected InternalSignificantTerms createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
    DocValueFormat format = DocValueFormat.RAW;
    int requiredSize = randomIntBetween(1, 5);
    int shardSize = requiredSize + 2;
    final int numBuckets = randomInt(shardSize);
    long globalSubsetSize = 0;
    long globalSupersetSize = 0;
    List<SignificantStringTerms.Bucket> buckets = new ArrayList<>(numBuckets);
    Set<BytesRef> terms = new HashSet<>();
    for (int i = 0; i < numBuckets; ++i) {
        BytesRef term = randomValueOtherThanMany(b -> terms.add(b) == false, () -> new BytesRef(randomAsciiOfLength(10)));
        int subsetDf = randomIntBetween(1, 10);
        int supersetDf = randomIntBetween(subsetDf, 20);
        int supersetSize = randomIntBetween(supersetDf, 30);
        globalSubsetSize += subsetDf;
        globalSupersetSize += supersetSize;
        buckets.add(new SignificantStringTerms.Bucket(term, subsetDf, subsetDf, supersetDf, supersetSize, EMPTY, format));
    }
    return new SignificantStringTerms(name, requiredSize, 1L, pipelineAggregators, metaData, format, globalSubsetSize, globalSupersetSize, significanceHeuristic, buckets);
}
Also used : DocValueFormat(org.elasticsearch.search.DocValueFormat) ArrayList(java.util.ArrayList) BytesRef(org.apache.lucene.util.BytesRef) HashSet(java.util.HashSet)

Example 2 with DocValueFormat

use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.

the class DoubleTermsTests method createTestInstance.

@Override
protected InternalTerms<?, ?> createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
    Terms.Order order = Terms.Order.count(false);
    long minDocCount = 1;
    int requiredSize = 3;
    int shardSize = requiredSize + 2;
    DocValueFormat format = DocValueFormat.RAW;
    boolean showTermDocCountError = false;
    long docCountError = -1;
    long otherDocCount = 0;
    List<DoubleTerms.Bucket> buckets = new ArrayList<>();
    final int numBuckets = randomInt(shardSize);
    Set<Double> terms = new HashSet<>();
    for (int i = 0; i < numBuckets; ++i) {
        double term = randomValueOtherThanMany(d -> terms.add(d) == false, random()::nextDouble);
        int docCount = randomIntBetween(1, 100);
        buckets.add(new DoubleTerms.Bucket(term, docCount, InternalAggregations.EMPTY, showTermDocCountError, docCountError, format));
    }
    return new DoubleTerms(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
}
Also used : DocValueFormat(org.elasticsearch.search.DocValueFormat) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet)

Example 3 with DocValueFormat

use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.

the class LongTermsTests method createTestInstance.

@Override
protected InternalTerms<?, ?> createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
    Terms.Order order = Terms.Order.count(false);
    long minDocCount = 1;
    int requiredSize = 3;
    int shardSize = requiredSize + 2;
    DocValueFormat format = DocValueFormat.RAW;
    boolean showTermDocCountError = false;
    long docCountError = -1;
    long otherDocCount = 0;
    List<LongTerms.Bucket> buckets = new ArrayList<>();
    final int numBuckets = randomInt(shardSize);
    Set<Long> terms = new HashSet<>();
    for (int i = 0; i < numBuckets; ++i) {
        long term = randomValueOtherThanMany(l -> terms.add(l) == false, random()::nextLong);
        int docCount = randomIntBetween(1, 100);
        buckets.add(new LongTerms.Bucket(term, docCount, InternalAggregations.EMPTY, showTermDocCountError, docCountError, format));
    }
    return new LongTerms(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
}
Also used : DocValueFormat(org.elasticsearch.search.DocValueFormat) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet)

Example 4 with DocValueFormat

use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.

the class InternalDateHistogramTests method createTestInstance.

@Override
protected InternalDateHistogram createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
    boolean keyed = randomBoolean();
    DocValueFormat format = DocValueFormat.RAW;
    int nbBuckets = randomInt(10);
    List<InternalDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
    long startingDate = System.currentTimeMillis();
    long interval = randomIntBetween(1, 3);
    long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();
    for (int i = 0; i < nbBuckets; i++) {
        long key = startingDate + (intervalMillis * i);
        buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, InternalAggregations.EMPTY));
    }
    InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC);
    return new InternalDateHistogram(name, buckets, order, 1, 0L, null, format, keyed, pipelineAggregators, metaData);
}
Also used : DocValueFormat(org.elasticsearch.search.DocValueFormat) ArrayList(java.util.ArrayList)

Example 5 with DocValueFormat

use of org.elasticsearch.search.DocValueFormat in project elasticsearch by elastic.

the class InternalHistogramTests method createTestInstance.

@Override
protected InternalHistogram createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
    final boolean keyed = randomBoolean();
    final DocValueFormat format = DocValueFormat.RAW;
    final int base = randomInt(50) - 30;
    final int numBuckets = randomInt(10);
    final int interval = randomIntBetween(1, 3);
    List<InternalHistogram.Bucket> buckets = new ArrayList<>();
    for (int i = 0; i < numBuckets; ++i) {
        final int docCount = TestUtil.nextInt(random(), 1, 50);
        buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, keyed, format, InternalAggregations.EMPTY));
    }
    return new InternalHistogram(name, buckets, (InternalOrder) InternalHistogram.Order.KEY_ASC, 1, null, format, keyed, pipelineAggregators, metaData);
}
Also used : DocValueFormat(org.elasticsearch.search.DocValueFormat) ArrayList(java.util.ArrayList)

Aggregations

DocValueFormat (org.elasticsearch.search.DocValueFormat)18 ArrayList (java.util.ArrayList)10 HashSet (java.util.HashSet)5 FieldDoc (org.apache.lucene.search.FieldDoc)2 ScoreDoc (org.apache.lucene.search.ScoreDoc)2 Sort (org.apache.lucene.search.Sort)2 SortField (org.apache.lucene.search.SortField)2 TopDocs (org.apache.lucene.search.TopDocs)2 BytesRef (org.apache.lucene.util.BytesRef)2 IOException (java.io.IOException)1 UncheckedIOException (java.io.UncheckedIOException)1 AbstractList (java.util.AbstractList)1 Callable (java.util.concurrent.Callable)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 DoubleHistogram (org.HdrHistogram.DoubleHistogram)1 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)1 Term (org.apache.lucene.index.Term)1 MinDocQuery (org.apache.lucene.queries.MinDocQuery)1 BooleanQuery (org.apache.lucene.search.BooleanQuery)1 Collector (org.apache.lucene.search.Collector)1