use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class TestConcurrentMergeScheduler method testDynamicDefaults.
public void testDynamicDefaults() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxMergeCount());
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxThreadCount());
iwc.setMergeScheduler(cms);
iwc.setMaxBufferedDocs(2);
LogMergePolicy lmp = newLogMergePolicy();
lmp.setMergeFactor(2);
iwc.setMergePolicy(lmp);
IndexWriter w = new IndexWriter(dir, iwc);
w.addDocument(new Document());
w.addDocument(new Document());
// flush
w.addDocument(new Document());
w.addDocument(new Document());
// flush + merge
// CMS should have now set true values:
assertTrue(cms.getMaxMergeCount() != ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
assertTrue(cms.getMaxThreadCount() != ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
w.close();
dir.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class TestConcurrentMergeScheduler method testLiveMaxMergeCount.
public void testLiveMaxMergeCount() throws Exception {
Directory d = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
TieredMergePolicy tmp = new TieredMergePolicy();
tmp.setSegmentsPerTier(1000);
tmp.setMaxMergeAtOnce(1000);
tmp.setMaxMergeAtOnceExplicit(10);
iwc.setMergePolicy(tmp);
iwc.setMaxBufferedDocs(2);
iwc.setRAMBufferSizeMB(-1);
final AtomicInteger maxRunningMergeCount = new AtomicInteger();
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() {
final AtomicInteger runningMergeCount = new AtomicInteger();
@Override
public void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
int count = runningMergeCount.incrementAndGet();
// evil?
synchronized (this) {
if (count > maxRunningMergeCount.get()) {
maxRunningMergeCount.set(count);
}
}
try {
super.doMerge(writer, merge);
} finally {
runningMergeCount.decrementAndGet();
}
}
};
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxMergeCount());
assertEquals(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, cms.getMaxThreadCount());
cms.setMaxMergesAndThreads(5, 3);
iwc.setMergeScheduler(cms);
IndexWriter w = new IndexWriter(d, iwc);
// Makes 100 segments
for (int i = 0; i < 200; i++) {
w.addDocument(new Document());
}
// No merges should have run so far, because TMP has high segmentsPerTier:
assertEquals(0, maxRunningMergeCount.get());
w.forceMerge(1);
// At most 5 merge threads should have launched at once:
assertTrue("maxRunningMergeCount=" + maxRunningMergeCount, maxRunningMergeCount.get() <= 5);
maxRunningMergeCount.set(0);
// Makes another 100 segments
for (int i = 0; i < 200; i++) {
w.addDocument(new Document());
}
((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).setMaxMergesAndThreads(1, 1);
w.forceMerge(1);
// At most 1 merge thread should have launched at once:
assertEquals(1, maxRunningMergeCount.get());
w.close();
d.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testUpdateAllDeletedSegment.
public void testUpdateAllDeletedSegment() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new StringField("id", "doc", Store.NO));
doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
// delete all docs in the first segment
writer.deleteDocuments(new Term("id", "doc"));
writer.addDocument(doc);
writer.updateBinaryDocValue(new Term("id", "doc"), "f1", toBytes(2L));
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
BinaryDocValues bdv = reader.leaves().get(0).reader().getBinaryDocValues("f1");
assertEquals(0, bdv.nextDoc());
assertEquals(2L, getValue(bdv));
reader.close();
dir.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testManyReopensAndFields.
public void testManyReopensAndFields() throws Exception {
Directory dir = newDirectory();
final Random random = random();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random));
LogMergePolicy lmp = newLogMergePolicy();
// merge often
lmp.setMergeFactor(3);
conf.setMergePolicy(lmp);
IndexWriter writer = new IndexWriter(dir, conf);
final boolean isNRT = random.nextBoolean();
DirectoryReader reader;
if (isNRT) {
reader = DirectoryReader.open(writer);
} else {
writer.commit();
reader = DirectoryReader.open(dir);
}
//System.out.println("TEST: isNRT=" + isNRT);
// 3-7
final int numFields = random.nextInt(4) + 3;
final long[] fieldValues = new long[numFields];
for (int i = 0; i < fieldValues.length; i++) {
fieldValues[i] = 1;
}
int numRounds = atLeast(15);
int docID = 0;
for (int i = 0; i < numRounds; i++) {
int numDocs = atLeast(5);
//System.out.println("[" + Thread.currentThread().getName() + "]: round=" + i + ", numDocs=" + numDocs);
for (int j = 0; j < numDocs; j++) {
Document doc = new Document();
doc.add(new StringField("id", "doc-" + docID, Store.NO));
// update key
doc.add(new StringField("key", "all", Store.NO));
// add all fields with their current value
for (int f = 0; f < fieldValues.length; f++) {
doc.add(new BinaryDocValuesField("f" + f, toBytes(fieldValues[f])));
}
writer.addDocument(doc);
++docID;
}
int fieldIdx = random.nextInt(fieldValues.length);
String updateField = "f" + fieldIdx;
// System.out.println("[" + Thread.currentThread().getName() + "]: updated field '" + updateField + "' to value " + fieldValues[fieldIdx]);
writer.updateBinaryDocValue(new Term("key", "all"), updateField, toBytes(++fieldValues[fieldIdx]));
if (random.nextDouble() < 0.2) {
// might also delete an already deleted document, ok!
int deleteDoc = random.nextInt(docID);
writer.deleteDocuments(new Term("id", "doc-" + deleteDoc));
// System.out.println("[" + Thread.currentThread().getName() + "]: deleted document: doc-" + deleteDoc);
}
// verify reader
if (!isNRT) {
writer.commit();
}
// System.out.println("[" + Thread.currentThread().getName() + "]: reopen reader: " + reader);
DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
assertNotNull(newReader);
reader.close();
reader = newReader;
// System.out.println("[" + Thread.currentThread().getName() + "]: reopened reader: " + reader);
// we delete at most one document per round
assertTrue(reader.numDocs() > 0);
for (LeafReaderContext context : reader.leaves()) {
LeafReader r = context.reader();
// System.out.println(((SegmentReader) r).getSegmentName());
Bits liveDocs = r.getLiveDocs();
for (int field = 0; field < fieldValues.length; field++) {
String f = "f" + field;
BinaryDocValues bdv = r.getBinaryDocValues(f);
assertNotNull(bdv);
int maxDoc = r.maxDoc();
for (int doc = 0; doc < maxDoc; doc++) {
if (liveDocs == null || liveDocs.get(doc)) {
assertEquals(doc, bdv.advance(doc));
assertEquals("invalid value for doc=" + doc + ", field=" + f + ", reader=" + r, fieldValues[field], getValue(bdv));
}
}
}
}
// System.out.println();
}
writer.close();
IOUtils.close(reader, dir);
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testUpdateFewSegments.
public void testUpdateFewSegments() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// generate few segments
conf.setMaxBufferedDocs(2);
// prevent merges for this test
conf.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter writer = new IndexWriter(dir, conf);
int numDocs = 10;
long[] expectedValues = new long[numDocs];
for (int i = 0; i < numDocs; i++) {
writer.addDocument(doc(i));
expectedValues[i] = i + 1;
}
writer.commit();
// update few docs
for (int i = 0; i < numDocs; i++) {
if (random().nextDouble() < 0.4) {
long value = (i + 1) * 2;
writer.updateBinaryDocValue(new Term("id", "doc-" + i), "val", toBytes(value));
expectedValues[i] = value;
}
}
final DirectoryReader reader;
if (random().nextBoolean()) {
// not NRT
writer.close();
reader = DirectoryReader.open(dir);
} else {
// NRT
reader = DirectoryReader.open(writer);
writer.close();
}
for (LeafReaderContext context : reader.leaves()) {
LeafReader r = context.reader();
BinaryDocValues bdv = r.getBinaryDocValues("val");
assertNotNull(bdv);
for (int i = 0; i < r.maxDoc(); i++) {
assertEquals(i, bdv.nextDoc());
long expected = expectedValues[i + context.docBase];
long actual = getValue(bdv);
assertEquals(expected, actual);
}
}
reader.close();
dir.close();
}
Aggregations