use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestMergeSchedulerExternal method testCustomMergeScheduler.
public void testCustomMergeScheduler() throws Exception {
// we don't really need to execute anything, just to make sure the custom MS
// compiles. But ensure that it can be used as well, e.g., no other hidden
// dependencies or something. Therefore, don't use any random API !
Directory dir = new RAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setMergeScheduler(new ReportingMergeScheduler());
IndexWriter writer = new IndexWriter(dir, conf);
writer.addDocument(new Document());
// trigger flush
writer.commit();
writer.addDocument(new Document());
// trigger flush
writer.commit();
writer.forceMerge(1);
writer.close();
dir.close();
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestDirectoryReaderReopen method testNPEAfterInvalidReindex1.
public void testNPEAfterInvalidReindex1() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(newStringField("id", "id", Field.Store.NO));
w.addDocument(doc);
doc = new Document();
doc.add(newStringField("id", "id2", Field.Store.NO));
w.addDocument(doc);
w.deleteDocuments(new Term("id", "id"));
w.commit();
w.close();
// Open reader w/ one segment w/ 2 docs, 1 deleted:
DirectoryReader r = DirectoryReader.open(dir);
// Blow away the index:
for (String fileName : dir.listAll()) {
dir.deleteFile(fileName);
}
w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
doc = new Document();
doc.add(newStringField("id", "id", Field.Store.NO));
doc.add(new NumericDocValuesField("ndv", 13));
w.addDocument(doc);
doc = new Document();
doc.add(newStringField("id", "id2", Field.Store.NO));
w.addDocument(doc);
w.commit();
doc = new Document();
doc.add(newStringField("id", "id2", Field.Store.NO));
w.addDocument(doc);
w.updateNumericDocValue(new Term("id", "id"), "ndv", 17L);
w.commit();
w.close();
expectThrows(IllegalStateException.class, () -> {
DirectoryReader.openIfChanged(r);
});
r.close();
w.close();
dir.close();
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestDirectoryReaderReopen method testNPEAfterInvalidReindex2.
public void testNPEAfterInvalidReindex2() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(newStringField("id", "id", Field.Store.NO));
w.addDocument(doc);
doc = new Document();
doc.add(newStringField("id", "id2", Field.Store.NO));
w.addDocument(doc);
w.deleteDocuments(new Term("id", "id"));
w.commit();
w.close();
// Open reader w/ one segment w/ 2 docs, 1 deleted:
DirectoryReader r = DirectoryReader.open(dir);
// Blow away the index:
for (String name : dir.listAll()) {
dir.deleteFile(name);
}
w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
doc = new Document();
doc.add(newStringField("id", "id", Field.Store.NO));
doc.add(new NumericDocValuesField("ndv", 13));
w.addDocument(doc);
w.commit();
doc = new Document();
doc.add(newStringField("id", "id2", Field.Store.NO));
w.addDocument(doc);
w.commit();
w.close();
expectThrows(IllegalStateException.class, () -> {
DirectoryReader.openIfChanged(r);
});
r.close();
dir.close();
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestLazyProxSkipping method createIndex.
private void createIndex(int numHits) throws IOException {
int numDocs = 500;
final Analyzer analyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName) {
return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true));
}
};
Directory directory = new SeekCountingDirectory(new RAMDirectory());
// note: test explicitly disables payloads
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(analyzer).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy(false)));
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
String content;
if (i % (numDocs / numHits) == 0) {
// add a document that matches the query "term1 term2"
content = this.term1 + " " + this.term2;
} else if (i % 15 == 0) {
// add a document that only contains term1
content = this.term1 + " " + this.term1;
} else {
// add a document that contains term2 but not term 1
content = this.term3 + " " + this.term2;
}
doc.add(newTextField(this.field, content, Field.Store.YES));
writer.addDocument(doc);
}
// make sure the index has only a single segment
writer.forceMerge(1);
writer.close();
LeafReader reader = getOnlyLeafReader(DirectoryReader.open(directory));
this.searcher = newSearcher(reader);
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestIndexWriterReader method testTooManySegments.
/** Make sure if all we do is open NRT reader against
* writer, we don't see merge starvation. */
public void testTooManySegments() throws Exception {
Directory dir = getAssertNoDeletesDirectory(new RAMDirectory());
// Don't use newIndexWriterConfig, because we need a
// "sane" mergePolicy:
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
// Create 500 segments:
for (int i = 0; i < 500; i++) {
Document doc = new Document();
doc.add(newStringField("id", "" + i, Field.Store.NO));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
// Make sure segment count never exceeds 100:
assertTrue(r.leaves().size() < 100);
r.close();
}
w.close();
dir.close();
}
Aggregations