use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testUpdateSameDocMultipleTimes.
public void testUpdateSameDocMultipleTimes() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new StringField("key", "doc", Store.NO));
doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
// flushed document
writer.addDocument(doc);
writer.commit();
// in-memory document
writer.addDocument(doc);
// update existing field
writer.updateBinaryDocValue(new Term("key", "doc"), "bdv", toBytes(17L));
// update existing field 2nd time in this commit
writer.updateBinaryDocValue(new Term("key", "doc"), "bdv", toBytes(3L));
writer.close();
final DirectoryReader reader = DirectoryReader.open(dir);
BinaryDocValues bdv = MultiDocValues.getBinaryValues(reader, "bdv");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(i, bdv.nextDoc());
assertEquals(3, getValue(bdv));
}
reader.close();
dir.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testMultipleDocValuesTypes.
public void testMultipleDocValuesTypes() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// prevent merges
conf.setMaxBufferedDocs(10);
IndexWriter writer = new IndexWriter(dir, conf);
for (int i = 0; i < 4; i++) {
Document doc = new Document();
doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
doc.add(new NumericDocValuesField("ndv", i));
doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
doc.add(new SortedDocValuesField("sdv", new BytesRef(Integer.toString(i))));
doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i))));
doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i * 2))));
writer.addDocument(doc);
}
writer.commit();
// update all docs' bdv field
writer.updateBinaryDocValue(new Term("dvUpdateKey", "dv"), "bdv", toBytes(17L));
writer.close();
final DirectoryReader reader = DirectoryReader.open(dir);
LeafReader r = reader.leaves().get(0).reader();
NumericDocValues ndv = r.getNumericDocValues("ndv");
BinaryDocValues bdv = r.getBinaryDocValues("bdv");
SortedDocValues sdv = r.getSortedDocValues("sdv");
SortedSetDocValues ssdv = r.getSortedSetDocValues("ssdv");
for (int i = 0; i < r.maxDoc(); i++) {
assertEquals(i, ndv.nextDoc());
assertEquals(i, ndv.longValue());
assertEquals(i, bdv.nextDoc());
assertEquals(17, getValue(bdv));
assertEquals(i, sdv.nextDoc());
BytesRef term = sdv.binaryValue();
assertEquals(new BytesRef(Integer.toString(i)), term);
assertEquals(i, ssdv.nextDoc());
long ord = ssdv.nextOrd();
term = ssdv.lookupOrd(ord);
assertEquals(i, Integer.parseInt(term.utf8ToString()));
// For the i=0 case, we added the same value twice, which was dedup'd by IndexWriter so it has only one value:
if (i != 0) {
ord = ssdv.nextOrd();
term = ssdv.lookupOrd(ord);
assertEquals(i * 2, Integer.parseInt(term.utf8ToString()));
}
assertEquals(SortedSetDocValues.NO_MORE_ORDS, ssdv.nextOrd());
}
reader.close();
dir.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testUpdateAllDeletedSegment.
public void testUpdateAllDeletedSegment() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new StringField("id", "doc", Store.NO));
doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// delete all docs in the first segment
writer.deleteDocuments(new Term("id", "doc"));
writer.addDocument(doc);
writer.updateBinaryDocValue(new Term("id", "doc"), "f1", toBytes(2L));
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
BinaryDocValues bdv = reader.leaves().get(0).reader().getBinaryDocValues("f1");
assertEquals(0, bdv.nextDoc());
assertEquals(2L, getValue(bdv));
reader.close();
dir.close();
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testManyReopensAndFields.
public void testManyReopensAndFields() throws Exception {
Directory dir = newDirectory();
final Random random = random();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random));
LogMergePolicy lmp = newLogMergePolicy();
// merge often
lmp.setMergeFactor(3);
conf.setMergePolicy(lmp);
IndexWriter writer = new IndexWriter(dir, conf);
final boolean isNRT = random.nextBoolean();
DirectoryReader reader;
if (isNRT) {
reader = DirectoryReader.open(writer);
} else {
writer.commit();
reader = DirectoryReader.open(dir);
}
//System.out.println("TEST: isNRT=" + isNRT);
// 3-7
final int numFields = random.nextInt(4) + 3;
final long[] fieldValues = new long[numFields];
for (int i = 0; i < fieldValues.length; i++) {
fieldValues[i] = 1;
}
int numRounds = atLeast(15);
int docID = 0;
for (int i = 0; i < numRounds; i++) {
int numDocs = atLeast(5);
//System.out.println("[" + Thread.currentThread().getName() + "]: round=" + i + ", numDocs=" + numDocs);
for (int j = 0; j < numDocs; j++) {
Document doc = new Document();
doc.add(new StringField("id", "doc-" + docID, Store.NO));
// update key
doc.add(new StringField("key", "all", Store.NO));
// add all fields with their current value
for (int f = 0; f < fieldValues.length; f++) {
doc.add(new BinaryDocValuesField("f" + f, toBytes(fieldValues[f])));
}
writer.addDocument(doc);
++docID;
}
int fieldIdx = random.nextInt(fieldValues.length);
String updateField = "f" + fieldIdx;
// System.out.println("[" + Thread.currentThread().getName() + "]: updated field '" + updateField + "' to value " + fieldValues[fieldIdx]);
writer.updateBinaryDocValue(new Term("key", "all"), updateField, toBytes(++fieldValues[fieldIdx]));
if (random.nextDouble() < 0.2) {
// might also delete an already deleted document, ok!
int deleteDoc = random.nextInt(docID);
writer.deleteDocuments(new Term("id", "doc-" + deleteDoc));
// System.out.println("[" + Thread.currentThread().getName() + "]: deleted document: doc-" + deleteDoc);
}
// verify reader
if (!isNRT) {
writer.commit();
}
// System.out.println("[" + Thread.currentThread().getName() + "]: reopen reader: " + reader);
DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
assertNotNull(newReader);
reader.close();
reader = newReader;
// System.out.println("[" + Thread.currentThread().getName() + "]: reopened reader: " + reader);
// we delete at most one document per round
assertTrue(reader.numDocs() > 0);
for (LeafReaderContext context : reader.leaves()) {
LeafReader r = context.reader();
// System.out.println(((SegmentReader) r).getSegmentName());
Bits liveDocs = r.getLiveDocs();
for (int field = 0; field < fieldValues.length; field++) {
String f = "f" + field;
BinaryDocValues bdv = r.getBinaryDocValues(f);
assertNotNull(bdv);
int maxDoc = r.maxDoc();
for (int doc = 0; doc < maxDoc; doc++) {
if (liveDocs == null || liveDocs.get(doc)) {
assertEquals(doc, bdv.advance(doc));
assertEquals("invalid value for doc=" + doc + ", field=" + f + ", reader=" + r, fieldValues[field], getValue(bdv));
}
}
}
}
// System.out.println();
}
writer.close();
IOUtils.close(reader, dir);
}
use of org.apache.lucene.document.BinaryDocValuesField in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testStressMultiThreading.
public void testStressMultiThreading() throws Exception {
final Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
final IndexWriter writer = new IndexWriter(dir, conf);
// create index
final int numFields = TestUtil.nextInt(random(), 1, 4);
final int numDocs = atLeast(2000);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "doc" + i, Store.NO));
double group = random().nextDouble();
String g;
if (group < 0.1)
g = "g0";
else if (group < 0.5)
g = "g1";
else if (group < 0.8)
g = "g2";
else
g = "g3";
doc.add(new StringField("updKey", g, Store.NO));
for (int j = 0; j < numFields; j++) {
long value = random().nextInt();
doc.add(new BinaryDocValuesField("f" + j, toBytes(value)));
// control, always updated to f * 2
doc.add(new BinaryDocValuesField("cf" + j, toBytes(value * 2)));
}
writer.addDocument(doc);
}
final int numThreads = TestUtil.nextInt(random(), 3, 6);
final CountDownLatch done = new CountDownLatch(numThreads);
final AtomicInteger numUpdates = new AtomicInteger(atLeast(100));
// same thread updates a field as well as reopens
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread("UpdateThread-" + i) {
@Override
public void run() {
DirectoryReader reader = null;
boolean success = false;
try {
Random random = random();
while (numUpdates.getAndDecrement() > 0) {
double group = random.nextDouble();
Term t;
if (group < 0.1)
t = new Term("updKey", "g0");
else if (group < 0.5)
t = new Term("updKey", "g1");
else if (group < 0.8)
t = new Term("updKey", "g2");
else
t = new Term("updKey", "g3");
final int field = random().nextInt(numFields);
final String f = "f" + field;
final String cf = "cf" + field;
// System.out.println("[" + Thread.currentThread().getName() + "] numUpdates=" + numUpdates + " updateTerm=" + t + " field=" + field);
long updValue = random.nextInt();
writer.updateDocValues(t, new BinaryDocValuesField(f, toBytes(updValue)), new BinaryDocValuesField(cf, toBytes(updValue * 2)));
if (random.nextDouble() < 0.2) {
// delete a random document
int doc = random.nextInt(numDocs);
// System.out.println("[" + Thread.currentThread().getName() + "] deleteDoc=doc" + doc);
writer.deleteDocuments(new Term("id", "doc" + doc));
}
if (random.nextDouble() < 0.05) {
// commit every 20 updates on average
// System.out.println("[" + Thread.currentThread().getName() + "] commit");
writer.commit();
}
if (random.nextDouble() < 0.1) {
// reopen NRT reader (apply updates), on average once every 10 updates
if (reader == null) {
// System.out.println("[" + Thread.currentThread().getName() + "] open NRT");
reader = DirectoryReader.open(writer);
} else {
// System.out.println("[" + Thread.currentThread().getName() + "] reopen NRT");
DirectoryReader r2 = DirectoryReader.openIfChanged(reader, writer);
if (r2 != null) {
reader.close();
reader = r2;
}
}
}
}
// System.out.println("[" + Thread.currentThread().getName() + "] DONE");
success = true;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
if (success) {
// suppress this exception only if there was another exception
throw new RuntimeException(e);
}
}
}
done.countDown();
}
}
};
}
for (Thread t : threads) t.start();
done.await();
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
for (LeafReaderContext context : reader.leaves()) {
LeafReader r = context.reader();
for (int i = 0; i < numFields; i++) {
BinaryDocValues bdv = r.getBinaryDocValues("f" + i);
BinaryDocValues control = r.getBinaryDocValues("cf" + i);
Bits liveDocs = r.getLiveDocs();
for (int j = 0; j < r.maxDoc(); j++) {
if (liveDocs == null || liveDocs.get(j)) {
assertEquals(j, bdv.advance(j));
assertEquals(j, control.advance(j));
assertEquals(getValue(control), getValue(bdv) * 2);
}
}
}
}
reader.close();
dir.close();
}
Aggregations