use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestDemoParallelLeafReader method getReindexerSameDVField.
/** Schema change by adding changing how the same "number" DV field is indexed. */
private ReindexingReader getReindexerSameDVField(Path root, final AtomicLong currentSchemaGen, final AtomicLong mergingSchemaGen) throws IOException {
return new ReindexingReader(root) {
@Override
protected IndexWriterConfig getIndexWriterConfig() throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig();
TieredMergePolicy tmp = new TieredMergePolicy();
// We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
tmp.setFloorSegmentMB(.01);
iwc.setMergePolicy(tmp);
if (TEST_NIGHTLY) {
// during nightly tests, we might use too many files if we arent careful
iwc.setUseCompoundFile(true);
}
return iwc;
}
@Override
protected Directory openDirectory(Path path) throws IOException {
MockDirectoryWrapper dir = newMockFSDirectory(path);
dir.setUseSlowOpenClosers(false);
dir.setThrottling(Throttling.NEVER);
return dir;
}
@Override
protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig();
// The order of our docIDs must precisely matching incoming reader:
iwc.setMergePolicy(new LogByteSizeMergePolicy());
IndexWriter w = new IndexWriter(parallelDir, iwc);
int maxDoc = reader.maxDoc();
if (oldSchemaGen <= 0) {
// Must slowly parse the stored field into a new doc values field:
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
Document oldDoc = reader.document(i);
Document newDoc = new Document();
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
newDoc.add(new NumericDocValuesField("number", newSchemaGen * value));
newDoc.add(new LongPoint("number", value));
w.addDocument(newDoc);
}
} else {
// Just carry over doc values from previous field:
NumericDocValues oldValues = reader.getNumericDocValues("number");
assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
Document oldDoc = reader.document(i);
Document newDoc = new Document();
assertEquals(i, oldValues.nextDoc());
newDoc.add(new NumericDocValuesField("number", newSchemaGen * (oldValues.longValue() / oldSchemaGen)));
w.addDocument(newDoc);
}
}
w.forceMerge(1);
w.close();
}
@Override
protected long getCurrentSchemaGen() {
return currentSchemaGen.get();
}
@Override
protected long getMergingSchemaGen() {
return mergingSchemaGen.get();
}
@Override
protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen) throws IOException {
if (DEBUG)
System.out.println(Thread.currentThread().getName() + ": TEST: now check parallel number DVs r=" + r + " parR=" + parR);
NumericDocValues numbers = parR.getNumericDocValues("numbers");
if (numbers == null) {
return;
}
int maxDoc = r.maxDoc();
boolean failed = false;
for (int i = 0; i < maxDoc; i++) {
Document oldDoc = r.document(i);
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
value *= schemaGen;
assertEquals(i, numbers.nextDoc());
if (value != numbers.longValue()) {
System.out.println("FAIL: docID=" + i + " " + oldDoc + " value=" + value + " number=" + numbers.longValue() + " numbers=" + numbers);
failed = true;
} else if (failed) {
System.out.println("OK: docID=" + i + " " + oldDoc + " value=" + value + " number=" + numbers.longValue());
}
}
assertFalse("FAILED r=" + r, failed);
}
};
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestConcurrentMergeScheduler method testNoStallMergeThreads.
// LUCENE-6197
public void testNoStallMergeThreads() throws Exception {
MockDirectoryWrapper dir = newMockDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setMaxBufferedDocs(2);
IndexWriter w = new IndexWriter(dir, iwc);
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(newStringField("field", "" + i, Field.Store.YES));
w.addDocument(doc);
}
w.close();
iwc = newIndexWriterConfig(new MockAnalyzer(random()));
AtomicBoolean failed = new AtomicBoolean();
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() {
@Override
protected void doStall() {
if (Thread.currentThread().getName().startsWith("Lucene Merge Thread")) {
failed.set(true);
}
super.doStall();
}
};
cms.setMaxMergesAndThreads(2, 1);
iwc.setMergeScheduler(cms);
iwc.setMaxBufferedDocs(2);
w = new IndexWriter(dir, iwc);
w.forceMerge(1);
w.close();
dir.close();
assertFalse(failed.get());
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestConcurrentMergeScheduler method testTotalBytesSize.
public void testTotalBytesSize() throws Exception {
Directory d = newDirectory();
if (d instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) d).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setMaxBufferedDocs(5);
CountDownLatch atLeastOneMerge = new CountDownLatch(1);
iwc.setMergeScheduler(new TrackingCMS(atLeastOneMerge));
if (TestUtil.getPostingsFormat("id").equals("SimpleText")) {
// no
iwc.setCodec(TestUtil.alwaysPostingsFormat(TestUtil.getDefaultPostingsFormat()));
}
IndexWriter w = new IndexWriter(d, iwc);
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, Field.Store.NO));
w.addDocument(doc);
if (random().nextBoolean()) {
w.deleteDocuments(new Term("id", "" + random().nextInt(i + 1)));
}
}
atLeastOneMerge.await();
assertTrue(((TrackingCMS) w.getConfig().getMergeScheduler()).totMergedBytes != 0);
w.close();
d.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestDemoParallelLeafReader method getReindexer.
private ReindexingReader getReindexer(Path root) throws IOException {
return new ReindexingReader(root) {
@Override
protected IndexWriterConfig getIndexWriterConfig() throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig();
TieredMergePolicy tmp = new TieredMergePolicy();
// We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
tmp.setFloorSegmentMB(.01);
iwc.setMergePolicy(tmp);
return iwc;
}
@Override
protected Directory openDirectory(Path path) throws IOException {
MockDirectoryWrapper dir = newMockFSDirectory(path);
dir.setUseSlowOpenClosers(false);
dir.setThrottling(Throttling.NEVER);
return dir;
}
@Override
protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig();
// The order of our docIDs must precisely matching incoming reader:
iwc.setMergePolicy(new LogByteSizeMergePolicy());
IndexWriter w = new IndexWriter(parallelDir, iwc);
int maxDoc = reader.maxDoc();
// Slowly parse the stored field into a new doc values field:
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
Document oldDoc = reader.document(i);
Document newDoc = new Document();
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
newDoc.add(new NumericDocValuesField("number", value));
newDoc.add(new LongPoint("number", value));
w.addDocument(newDoc);
}
w.forceMerge(1);
w.close();
}
@Override
protected long getCurrentSchemaGen() {
return 0;
}
};
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestCrash method testCrashAfterClose.
public void testCrashAfterClose() throws IOException {
IndexWriter writer = initIndex(random(), false);
MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
writer.close();
dir.crash();
/*
String[] l = dir.list();
Arrays.sort(l);
for(int i=0;i<l.length;i++)
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
IndexReader reader = DirectoryReader.open(dir);
assertEquals(157, reader.numDocs());
reader.close();
dir.close();
}
Aggregations