use of org.apache.lucene.document.LegacyIntField in project elasticsearch by elastic.
the class SimpleLuceneTests method testNumericTermDocsFreqs.
/**
* A test just to verify that term freqs are not stored for numeric fields. <tt>int1</tt> is not storing termFreq
* and <tt>int2</tt> does.
*/
public void testNumericTermDocsFreqs() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
FieldType type = LegacyIntField.TYPE_NOT_STORED;
LegacyIntField field = new LegacyIntField("int1", 1, type);
doc.add(field);
type = new FieldType(LegacyIntField.TYPE_NOT_STORED);
type.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
type.freeze();
field = new LegacyIntField("int1", 1, type);
doc.add(field);
field = new LegacyIntField("int2", 1, type);
doc.add(field);
field = new LegacyIntField("int2", 1, type);
doc.add(field);
indexWriter.addDocument(doc);
IndexReader reader = DirectoryReader.open(indexWriter);
LeafReader atomicReader = SlowCompositeReaderWrapper.wrap(reader);
Terms terms = atomicReader.terms("int1");
TermsEnum termsEnum = terms.iterator();
termsEnum.next();
PostingsEnum termDocs = termsEnum.postings(null);
assertThat(termDocs.nextDoc(), equalTo(0));
assertThat(termDocs.docID(), equalTo(0));
assertThat(termDocs.freq(), equalTo(1));
terms = atomicReader.terms("int2");
termsEnum = terms.iterator();
termsEnum.next();
termDocs = termsEnum.postings(termDocs);
assertThat(termDocs.nextDoc(), equalTo(0));
assertThat(termDocs.docID(), equalTo(0));
assertThat(termDocs.freq(), equalTo(2));
reader.close();
indexWriter.close();
}
use of org.apache.lucene.document.LegacyIntField in project elasticsearch by elastic.
the class SimpleLuceneTests method testSimpleNumericOps.
public void testSimpleNumericOps() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
IndexableField f = doc.getField("test");
assertThat(f.stringValue(), equalTo("2"));
BytesRefBuilder bytes = new BytesRefBuilder();
LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
doc = searcher.doc(topDocs.scoreDocs[0].doc);
f = doc.getField("test");
assertThat(f.stringValue(), equalTo("2"));
indexWriter.close();
}
use of org.apache.lucene.document.LegacyIntField in project elasticsearch by elastic.
the class NestedChildrenFilterTests method testNestedChildrenFilter.
public void testNestedChildrenFilter() throws Exception {
int numParentDocs = scaledRandomIntBetween(0, 32);
int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
for (int i = 0; i < numParentDocs; i++) {
int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
List<Document> docs = new ArrayList<>(numChildDocs + 1);
for (int j = 0; j < numChildDocs; j++) {
Document childDoc = new Document();
childDoc.add(new StringField("type", "child", Field.Store.NO));
docs.add(childDoc);
}
Document parenDoc = new Document();
parenDoc.add(new StringField("type", "parent", Field.Store.NO));
parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES));
docs.add(parenDoc);
writer.addDocuments(docs);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
Query childFilter = new TermQuery(new Term("type", "child"));
int checkedParents = 0;
final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false);
for (LeafReaderContext leaf : reader.leaves()) {
DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = parents.nextDoc()) {
int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue();
hitContext.reset(null, leaf, parentDoc, searcher);
NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext);
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
checkedParents++;
}
}
assertThat(checkedParents, equalTo(numParentDocs));
reader.close();
dir.close();
}
Aggregations