use of org.apache.lucene.codecs.compressing.CompressingCodec in project lucene-solr by apache.
the class Test2BPostingsBytes method test.
public void test() throws Exception {
IndexWriterConfig defaultConfig = new IndexWriterConfig(null);
Codec defaultCodec = defaultConfig.getCodec();
if ((new IndexWriterConfig(null)).getCodec() instanceof CompressingCodec) {
Pattern regex = Pattern.compile("maxDocsPerChunk=(\\d+), blockSize=(\\d+)");
Matcher matcher = regex.matcher(defaultCodec.toString());
assertTrue("Unexpected CompressingCodec toString() output: " + defaultCodec.toString(), matcher.find());
int maxDocsPerChunk = Integer.parseInt(matcher.group(1));
int blockSize = Integer.parseInt(matcher.group(2));
int product = maxDocsPerChunk * blockSize;
assumeTrue(defaultCodec.getName() + " maxDocsPerChunk (" + maxDocsPerChunk + ") * blockSize (" + blockSize + ") < 16 - this can trigger OOM with -Dtests.heapsize=30g", product >= 16);
}
BaseDirectoryWrapper dir = newFSDirectory(createTempDir("2BPostingsBytes1"));
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setRAMBufferSizeMB(256.0).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy(false, 10)).setOpenMode(IndexWriterConfig.OpenMode.CREATE).setCodec(TestUtil.getDefaultCodec()));
MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogByteSizeMergePolicy) {
// 1 petabyte:
((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024 * 1024 * 1024);
}
Document doc = new Document();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
ft.setOmitNorms(true);
MyTokenStream tokenStream = new MyTokenStream();
Field field = new Field("field", tokenStream, ft);
doc.add(field);
final int numDocs = 1000;
for (int i = 0; i < numDocs; i++) {
if (i % 2 == 1) {
// trick blockPF's little optimization
tokenStream.n = 65536;
} else {
tokenStream.n = 65537;
}
w.addDocument(doc);
}
w.forceMerge(1);
w.close();
DirectoryReader oneThousand = DirectoryReader.open(dir);
DirectoryReader[] subReaders = new DirectoryReader[1000];
Arrays.fill(subReaders, oneThousand);
BaseDirectoryWrapper dir2 = newFSDirectory(createTempDir("2BPostingsBytes2"));
if (dir2 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir2).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriter w2 = new IndexWriter(dir2, new IndexWriterConfig(null));
TestUtil.addIndexesSlowly(w2, subReaders);
w2.forceMerge(1);
w2.close();
oneThousand.close();
DirectoryReader oneMillion = DirectoryReader.open(dir2);
subReaders = new DirectoryReader[2000];
Arrays.fill(subReaders, oneMillion);
BaseDirectoryWrapper dir3 = newFSDirectory(createTempDir("2BPostingsBytes3"));
if (dir3 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir3).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriter w3 = new IndexWriter(dir3, new IndexWriterConfig(null));
TestUtil.addIndexesSlowly(w3, subReaders);
w3.forceMerge(1);
w3.close();
oneMillion.close();
dir.close();
dir2.close();
dir3.close();
}
Aggregations