use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testVeryLargeButLegalBytes.
public void testVeryLargeButLegalBytes() throws IOException {
Analyzer analyzer = new MockAnalyzer(random());
Directory directory = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
conf.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
Document doc = new Document();
byte[] bytes = new byte[32766];
BytesRef b = new BytesRef(bytes);
random().nextBytes(bytes);
doc.add(new BinaryDocValuesField("dv", b));
iwriter.addDocument(doc);
iwriter.close();
// Now search the index:
// read-only=true
IndexReader ireader = DirectoryReader.open(directory);
assert ireader.leaves().size() == 1;
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
assertEquals(0, dv.nextDoc());
assertEquals(new BytesRef(bytes), dv.binaryValue());
ireader.close();
directory.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testTwoFieldsMixed.
public void testTwoFieldsMixed() throws IOException {
Directory directory = newDirectory();
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
Document doc = new Document();
String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
String text = "This is the text to be indexed. " + longTerm;
doc.add(newTextField("fieldname", text, Field.Store.YES));
doc.add(new NumericDocValuesField("dv1", 5));
doc.add(new BinaryDocValuesField("dv2", new BytesRef("hello world")));
iwriter.addDocument(doc);
iwriter.close();
// Now search the index:
// read-only=true
IndexReader ireader = DirectoryReader.open(directory);
IndexSearcher isearcher = new IndexSearcher(ireader);
assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
Query query = new TermQuery(new Term("fieldname", "text"));
TopDocs hits = isearcher.search(query, 1);
assertEquals(1, hits.totalHits);
// Iterate through the results:
for (int i = 0; i < hits.scoreDocs.length; i++) {
int docID = hits.scoreDocs[i].doc;
Document hitDoc = isearcher.doc(docID);
assertEquals(text, hitDoc.get("fieldname"));
assert ireader.leaves().size() == 1;
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1");
assertEquals(docID, dv.advance(docID));
assertEquals(5, dv.longValue());
BinaryDocValues dv2 = ireader.leaves().get(0).reader().getBinaryDocValues("dv2");
assertEquals(docID, dv2.advance(docID));
assertEquals(new BytesRef("hello world"), dv2.binaryValue());
}
ireader.close();
directory.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testTwoBytesOneMissingWithMerging.
public void testTwoBytesOneMissingWithMerging() throws IOException {
Directory directory = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(null);
conf.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
Document doc = new Document();
doc.add(new StringField("id", "0", Field.Store.YES));
doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
iw.addDocument(doc);
iw.commit();
doc = new Document();
doc.add(new StringField("id", "1", Field.Store.YES));
iw.addDocument(doc);
iw.forceMerge(1);
iw.close();
IndexReader ir = DirectoryReader.open(directory);
assertEquals(1, ir.leaves().size());
LeafReader ar = ir.leaves().get(0).reader();
BinaryDocValues dv = ar.getBinaryDocValues("dv1");
assertEquals(0, dv.nextDoc());
assertEquals(new BytesRef(), dv.binaryValue());
assertEquals(NO_MORE_DOCS, dv.nextDoc());
ir.close();
directory.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestDocValues method testBinaryField.
/**
* field with binary docvalues
*/
public void testBinaryField() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new BinaryDocValuesField("foo", new BytesRef("bar")));
iw.addDocument(doc);
DirectoryReader dr = DirectoryReader.open(iw);
LeafReader r = getOnlyLeafReader(dr);
// ok
assertNotNull(DocValues.getBinary(r, "foo"));
// errors
expectThrows(IllegalStateException.class, () -> {
DocValues.getNumeric(r, "foo");
});
expectThrows(IllegalStateException.class, () -> {
DocValues.getSorted(r, "foo");
});
expectThrows(IllegalStateException.class, () -> {
DocValues.getSortedSet(r, "foo");
});
expectThrows(IllegalStateException.class, () -> {
DocValues.getSortedNumeric(r, "foo");
});
dr.close();
iw.close();
dir.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestDocValuesIndexing method testAddBinaryTwice.
public void testAddBinaryTwice() throws IOException {
Analyzer analyzer = new MockAnalyzer(random());
Directory directory = newDirectory();
// we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!1
IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
iwc.setMergePolicy(newLogMergePolicy());
IndexWriter iwriter = new IndexWriter(directory, iwc);
Document doc = new Document();
doc.add(new BinaryDocValuesField("dv", new BytesRef("foo!")));
iwriter.addDocument(doc);
doc.add(new BinaryDocValuesField("dv", new BytesRef("bar!")));
expectThrows(IllegalArgumentException.class, () -> {
iwriter.addDocument(doc);
});
IndexReader ir = iwriter.getReader();
assertEquals(1, ir.numDocs());
ir.close();
iwriter.close();
directory.close();
}
Aggregations