use of org.apache.lucene.util.LineFileDocs in project lucene-solr by apache.
the class TestAllFilesHaveChecksumFooter method test.
public void test() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
conf.setCodec(TestUtil.getDefaultCodec());
RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
// Use LineFileDocs so we (hopefully) get most Lucene features
// tested, e.g. IntPoint was recently added to it:
LineFileDocs docs = new LineFileDocs(random());
for (int i = 0; i < 100; i++) {
riw.addDocument(docs.nextDoc());
if (random().nextInt(7) == 0) {
riw.commit();
}
if (random().nextInt(20) == 0) {
riw.deleteDocuments(new Term("docid", Integer.toString(i)));
}
if (random().nextInt(15) == 0) {
riw.updateNumericDocValue(new Term("docid", Integer.toString(i)), "docid_intDV", Long.valueOf(i));
}
}
riw.close();
checkFooters(dir);
dir.close();
}
use of org.apache.lucene.util.LineFileDocs in project lucene-solr by apache.
the class TestAllFilesHaveCodecHeader method test.
public void test() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
conf.setCodec(TestUtil.getDefaultCodec());
RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
// Use LineFileDocs so we (hopefully) get most Lucene features
// tested, e.g. IntPoint was recently added to it:
LineFileDocs docs = new LineFileDocs(random());
for (int i = 0; i < 100; i++) {
riw.addDocument(docs.nextDoc());
if (random().nextInt(7) == 0) {
riw.commit();
}
if (random().nextInt(20) == 0) {
riw.deleteDocuments(new Term("docid", Integer.toString(i)));
}
if (random().nextInt(15) == 0) {
riw.updateNumericDocValue(new Term("docid", Integer.toString(i)), "docid_intDV", Long.valueOf(i));
}
}
riw.close();
checkHeaders(dir, new HashMap<String, String>());
dir.close();
}
use of org.apache.lucene.util.LineFileDocs in project lucene-solr by apache.
the class TestDuelingCodecs method createRandomIndex.
/**
* populates a writer with random stuff. this must be fully reproducable with the seed!
*/
public static void createRandomIndex(int numdocs, RandomIndexWriter writer, long seed) throws IOException {
Random random = new Random(seed);
// primary source for our data is from linefiledocs, it's realistic.
LineFileDocs lineFileDocs = new LineFileDocs(random);
// because linefiledocs doesn't cover all the possibilities.
for (int i = 0; i < numdocs; i++) {
Document document = lineFileDocs.nextDoc();
// grab the title and add some SortedSet instances for fun
String title = document.get("titleTokenized");
String[] split = title.split("\\s+");
document.removeFields("sortedset");
for (String trash : split) {
document.add(new SortedSetDocValuesField("sortedset", new BytesRef(trash)));
}
// add a numeric dv field sometimes
document.removeFields("sparsenumeric");
if (random.nextInt(4) == 2) {
document.add(new NumericDocValuesField("sparsenumeric", random.nextInt()));
}
// add sortednumeric sometimes
document.removeFields("sparsesortednum");
if (random.nextInt(5) == 1) {
document.add(new SortedNumericDocValuesField("sparsesortednum", random.nextLong()));
if (random.nextBoolean()) {
document.add(new SortedNumericDocValuesField("sparsesortednum", random.nextLong()));
}
}
writer.addDocument(document);
}
lineFileDocs.close();
}
use of org.apache.lucene.util.LineFileDocs in project lucene-solr by apache.
the class TestSwappedIndexFiles method test.
public void test() throws Exception {
Directory dir1 = newDirectory();
Directory dir2 = newDirectory();
// Disable CFS 80% of the time so we can truncate individual files, but the other 20% of the time we test truncation of .cfs/.cfe too:
boolean useCFS = random().nextInt(5) == 1;
// Use LineFileDocs so we (hopefully) get most Lucene features
// tested, e.g. IntPoint was recently added to it:
LineFileDocs docs = new LineFileDocs(random());
Document doc = docs.nextDoc();
long seed = random().nextLong();
indexOneDoc(seed, dir1, doc, useCFS);
indexOneDoc(seed, dir2, doc, useCFS);
swapFiles(dir1, dir2);
dir1.close();
dir2.close();
}
use of org.apache.lucene.util.LineFileDocs in project lucene-solr by apache.
the class TestForceMergeForever method test.
public void test() throws Exception {
final Directory d = newDirectory();
MockAnalyzer analyzer = new MockAnalyzer(random());
analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(analyzer));
// Try to make an index that requires merging:
w.getConfig().setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 11));
final int numStartDocs = atLeast(20);
final LineFileDocs docs = new LineFileDocs(random());
for (int docIDX = 0; docIDX < numStartDocs; docIDX++) {
w.addDocument(docs.nextDoc());
}
MergePolicy mp = w.getConfig().getMergePolicy();
final int mergeAtOnce = 1 + w.segmentInfos.size();
if (mp instanceof TieredMergePolicy) {
((TieredMergePolicy) mp).setMaxMergeAtOnce(mergeAtOnce);
} else if (mp instanceof LogMergePolicy) {
((LogMergePolicy) mp).setMergeFactor(mergeAtOnce);
} else {
// skip test
w.close();
d.close();
return;
}
final AtomicBoolean doStop = new AtomicBoolean();
w.getConfig().setMaxBufferedDocs(2);
Thread t = new Thread() {
@Override
public void run() {
try {
while (!doStop.get()) {
w.updateDocument(new Term("docid", "" + random().nextInt(numStartDocs)), docs.nextDoc());
// Force deletes to apply
w.getReader().close();
}
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
};
t.start();
w.forceMerge(1);
doStop.set(true);
t.join();
assertTrue("merge count is " + w.mergeCount.get(), w.mergeCount.get() <= 1);
w.close();
d.close();
docs.close();
}
Aggregations