use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestSizeBoundedForceMerge method testAllSegmentsSmall.
public void testAllSegmentsSmall() throws Exception {
Directory dir = new RAMDirectory();
IndexWriterConfig conf = newWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
addDocs(writer, 3);
addDocs(writer, 3);
addDocs(writer, 3);
addDocs(writer, 3);
writer.close();
conf = newWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.setMaxMergeDocs(3);
conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.forceMerge(1);
writer.close();
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
assertEquals(1, sis.size());
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestPointValues method doTestMergedStats.
private void doTestMergedStats() throws IOException {
final int numDims = TestUtil.nextInt(random(), 1, 8);
final int numBytesPerDim = TestUtil.nextInt(random(), 1, 16);
Directory dir = new RAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
final int numDocs = TestUtil.nextInt(random(), 10, 20);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
final int numPoints = random().nextInt(3);
for (int j = 0; j < numPoints; ++j) {
doc.add(new BinaryPoint("field", randomBinaryValue(numDims, numBytesPerDim)));
}
w.addDocument(doc);
if (random().nextBoolean()) {
DirectoryReader.open(w).close();
}
}
final IndexReader reader1 = DirectoryReader.open(w);
w.forceMerge(1);
final IndexReader reader2 = DirectoryReader.open(w);
final PointValues expected = getOnlyLeafReader(reader2).getPointValues("field");
if (expected == null) {
assertNull(PointValues.getMinPackedValue(reader1, "field"));
assertNull(PointValues.getMaxPackedValue(reader1, "field"));
assertEquals(0, PointValues.getDocCount(reader1, "field"));
assertEquals(0, PointValues.size(reader1, "field"));
} else {
assertArrayEquals(expected.getMinPackedValue(), PointValues.getMinPackedValue(reader1, "field"));
assertArrayEquals(expected.getMaxPackedValue(), PointValues.getMaxPackedValue(reader1, "field"));
assertEquals(expected.getDocCount(), PointValues.getDocCount(reader1, "field"));
assertEquals(expected.size(), PointValues.size(reader1, "field"));
}
IOUtils.close(w, reader1, reader2, dir);
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestPointValues method testCheckIndexIncludesPoints.
public void testCheckIndexIncludesPoints() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
Document doc = new Document();
doc.add(new IntPoint("int1", 17));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int1", 44));
doc.add(new IntPoint("int2", -17));
w.addDocument(doc);
w.close();
ByteArrayOutputStream output = new ByteArrayOutputStream();
CheckIndex.Status status = TestUtil.checkIndex(dir, false, true, output);
assertEquals(1, status.segmentInfos.size());
CheckIndex.Status.SegmentInfoStatus segStatus = status.segmentInfos.get(0);
// total 3 point values were index:
assertEquals(3, segStatus.pointsStatus.totalValuePoints);
// ... across 2 fields:
assertEquals(2, segStatus.pointsStatus.totalValueFields);
// Make sure CheckIndex in fact declares that it is testing points!
assertTrue(output.toString(IOUtils.UTF_8).contains("test: points..."));
dir.close();
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestSizeBoundedForceMerge method testNumDocsLimit.
public void testNumDocsLimit() throws Exception {
// tests that the max merge docs constraint is applied during forceMerge.
Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one.
IndexWriterConfig conf = newWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
addDocs(writer, 3);
addDocs(writer, 3);
addDocs(writer, 5);
addDocs(writer, 3);
addDocs(writer, 3);
addDocs(writer, 3);
addDocs(writer, 3);
writer.close();
conf = newWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.setMaxMergeDocs(3);
conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.forceMerge(1);
writer.close();
// Should only be 3 segments in the index, because one of them exceeds the size limit
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
assertEquals(3, sis.size());
}
use of org.apache.lucene.store.RAMDirectory in project lucene-solr by apache.
the class TestMultiPhraseQuery method testZeroPosIncr.
public void testZeroPosIncr() throws IOException {
Directory dir = new RAMDirectory();
final Token[] tokens = new Token[3];
tokens[0] = new Token();
tokens[0].append("a");
tokens[0].setPositionIncrement(1);
tokens[1] = new Token();
tokens[1].append("b");
tokens[1].setPositionIncrement(0);
tokens[2] = new Token();
tokens[2].append("c");
tokens[2].setPositionIncrement(0);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new TextField("field", new CannedTokenStream(tokens)));
writer.addDocument(doc);
doc = new Document();
doc.add(new TextField("field", new CannedTokenStream(tokens)));
writer.addDocument(doc);
IndexReader r = writer.getReader();
writer.close();
IndexSearcher s = newSearcher(r);
MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder();
// case):
if (true) {
mpqb.add(new Term[] { new Term("field", "b"), new Term("field", "c") }, 0);
mpqb.add(new Term[] { new Term("field", "a") }, 0);
} else {
mpqb.add(new Term[] { new Term("field", "a") }, 0);
mpqb.add(new Term[] { new Term("field", "b"), new Term("field", "c") }, 0);
}
TopDocs hits = s.search(mpqb.build(), 2);
assertEquals(2, hits.totalHits);
assertEquals(hits.scoreDocs[0].score, hits.scoreDocs[1].score, 1e-5);
/*
for(int hit=0;hit<hits.totalHits;hit++) {
ScoreDoc sd = hits.scoreDocs[hit];
System.out.println(" hit doc=" + sd.doc + " score=" + sd.score);
}
*/
r.close();
dir.close();
}
Aggregations