use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class TestFileSwitchDirectory method testBasic.
/**
* Test if writing doc stores to disk and everything else to ram works.
*/
public void testBasic() throws IOException {
Set<String> fileExtensions = new HashSet<>();
fileExtensions.add(CompressingStoredFieldsWriter.FIELDS_EXTENSION);
fileExtensions.add(CompressingStoredFieldsWriter.FIELDS_INDEX_EXTENSION);
MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random(), new RAMDirectory());
// only part of an index
primaryDir.setCheckIndexOnClose(false);
MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random(), new RAMDirectory());
// only part of an index
secondaryDir.setCheckIndexOnClose(false);
FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true);
// for now we wire the default codec because we rely upon its specific impl
IndexWriter writer = new IndexWriter(fsd, new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(false)).setCodec(TestUtil.getDefaultCodec()).setUseCompoundFile(false));
TestIndexWriterReader.createIndexNoClose(true, "ram", writer);
IndexReader reader = DirectoryReader.open(writer);
assertEquals(100, reader.maxDoc());
writer.commit();
// we should see only fdx,fdt files here
String[] files = primaryDir.listAll();
assertTrue(files.length > 0);
for (int x = 0; x < files.length; x++) {
String ext = FileSwitchDirectory.getExtension(files[x]);
assertTrue(fileExtensions.contains(ext));
}
files = secondaryDir.listAll();
assertTrue(files.length > 0);
// we should not see fdx,fdt files here
for (int x = 0; x < files.length; x++) {
String ext = FileSwitchDirectory.getExtension(files[x]);
assertFalse(fileExtensions.contains(ext));
}
reader.close();
writer.close();
files = fsd.listAll();
for (int i = 0; i < files.length; i++) {
assertNotNull(files[i]);
}
fsd.close();
}
use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class TestMultiMMap method assertChunking.
private void assertChunking(Random random, int chunkSize) throws Exception {
Path path = createTempDir("mmap" + chunkSize);
MMapDirectory mmapDir = new MMapDirectory(path, chunkSize);
// we will map a lot, try to turn on the unmap hack
if (MMapDirectory.UNMAP_SUPPORTED)
mmapDir.setUseUnmap(true);
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, mmapDir);
RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
Field docid = newStringField("docid", "0", Field.Store.YES);
Field junk = newStringField("junk", "", Field.Store.YES);
doc.add(docid);
doc.add(junk);
int numDocs = 100;
for (int i = 0; i < numDocs; i++) {
docid.setStringValue("" + i);
junk.setStringValue(TestUtil.randomUnicodeString(random));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
int numAsserts = atLeast(100);
for (int i = 0; i < numAsserts; i++) {
int docID = random.nextInt(numDocs);
assertEquals("" + docID, reader.document(docID).get("docid"));
}
reader.close();
dir.close();
}
use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class TestRAMDirectory method testRAMDirectory.
public void testRAMDirectory() throws IOException {
Path indexDir = buildIndex();
FSDirectory dir = new SimpleFSDirectory(indexDir);
MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random(), new RAMDirectory(dir, newIOContext(random())));
// close the underlaying directory
dir.close();
// Check size
assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
// open reader to test document count
IndexReader reader = DirectoryReader.open(ramDir);
assertEquals(DOCS_TO_ADD, reader.numDocs());
// open search zo check if all doc's are there
IndexSearcher searcher = newSearcher(reader);
// search for all documents
for (int i = 0; i < DOCS_TO_ADD; i++) {
Document doc = searcher.doc(i);
assertTrue(doc.getField("content") != null);
}
// cleanup
reader.close();
}
use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class TestSimilarity2 method testEmptyField.
/** similar to the above, but ORs the query with a real field */
public void testEmptyField() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(newTextField("foo", "bar", Field.Store.NO));
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher is = newSearcher(ir);
for (Similarity sim : sims) {
is.setSimilarity(sim);
BooleanQuery.Builder query = new BooleanQuery.Builder();
query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
query.add(new TermQuery(new Term("bar", "baz")), BooleanClause.Occur.SHOULD);
assertEquals(1, is.search(query.build(), 10).totalHits);
}
ir.close();
dir.close();
}
use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class TestSimilarity2 method testEmptyIndex.
/** because of stupid things like querynorm, it's possible we computeStats on a field that doesnt exist at all
* test this against a totally empty index, to make sure sims handle it
*/
public void testEmptyIndex() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher is = newSearcher(ir);
for (Similarity sim : sims) {
is.setSimilarity(sim);
assertEquals(0, is.search(new TermQuery(new Term("foo", "bar")), 10).totalHits);
}
ir.close();
dir.close();
}
Aggregations