use of org.apache.lucene.index.LogMergePolicy in project lucene-solr by apache.
the class TestUtil method reduceOpenFiles.
/** just tries to configure things to keep the open file
* count lowish */
public static void reduceOpenFiles(IndexWriter w) {
// keep number of open files lowish
MergePolicy mp = w.getConfig().getMergePolicy();
mp.setNoCFSRatio(1.0);
if (mp instanceof LogMergePolicy) {
LogMergePolicy lmp = (LogMergePolicy) mp;
lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
} else if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
}
MergeScheduler ms = w.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
// wtf... shouldnt it be even lower since it's 1 by default?!?!
((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
}
}
use of org.apache.lucene.index.LogMergePolicy in project lucene-solr by apache.
the class SimplePrimaryNode method initWriter.
private static IndexWriter initWriter(int id, Random random, Path indexPath, boolean doCheckIndexOnClose) throws IOException {
Directory dir = SimpleReplicaNode.getDirectory(random, id, indexPath, doCheckIndexOnClose);
MockAnalyzer analyzer = new MockAnalyzer(random);
analyzer.setMaxTokenLength(TestUtil.nextInt(random, 1, IndexWriter.MAX_TERM_LENGTH));
IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(random, analyzer);
MergePolicy mp = iwc.getMergePolicy();
// Force more frequent merging so we stress merge warming:
if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
tmp.setSegmentsPerTier(3);
tmp.setMaxMergeAtOnce(3);
} else if (mp instanceof LogMergePolicy) {
LogMergePolicy lmp = (LogMergePolicy) mp;
lmp.setMergeFactor(3);
}
IndexWriter writer = new IndexWriter(dir, iwc);
TestUtil.reduceOpenFiles(writer);
return writer;
}
use of org.apache.lucene.index.LogMergePolicy in project lucene-solr by apache.
the class TestDirectoryTaxonomyReader method testOpenIfChangedManySegments.
@Test
public void testOpenIfChangedManySegments() throws Exception {
// test openIfChanged() when the taxonomy contains many segments
Directory dir = newDirectory();
DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir) {
@Override
protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) {
IndexWriterConfig conf = super.createIndexWriterConfig(openMode);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMergeFactor(2);
return conf;
}
};
TaxonomyReader reader = new DirectoryTaxonomyReader(writer);
int numRounds = random().nextInt(10) + 10;
// one for root
int numCategories = 1;
for (int i = 0; i < numRounds; i++) {
int numCats = random().nextInt(4) + 1;
for (int j = 0; j < numCats; j++) {
writer.addCategory(new FacetLabel(Integer.toString(i), Integer.toString(j)));
}
numCategories += numCats + 1;
TaxonomyReader newtr = TaxonomyReader.openIfChanged(reader);
assertNotNull(newtr);
reader.close();
reader = newtr;
// assert categories
assertEquals(numCategories, reader.getSize());
int roundOrdinal = reader.getOrdinal(new FacetLabel(Integer.toString(i)));
int[] parents = reader.getParallelTaxonomyArrays().parents();
// round's parent is root
assertEquals(0, parents[roundOrdinal]);
for (int j = 0; j < numCats; j++) {
int ord = reader.getOrdinal(new FacetLabel(Integer.toString(i), Integer.toString(j)));
// round's parent is root
assertEquals(roundOrdinal, parents[ord]);
}
}
reader.close();
writer.close();
dir.close();
}
use of org.apache.lucene.index.LogMergePolicy in project lucene-solr by apache.
the class CreateIndexTask method createWriterConfig.
public static IndexWriterConfig createWriterConfig(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) {
@SuppressWarnings("deprecation") IndexWriterConfig iwConf = new IndexWriterConfig(runData.getAnalyzer());
iwConf.setOpenMode(mode);
IndexDeletionPolicy indexDeletionPolicy = getIndexDeletionPolicy(config);
iwConf.setIndexDeletionPolicy(indexDeletionPolicy);
if (commit != null) {
iwConf.setIndexCommit(commit);
}
final String mergeScheduler = config.get("merge.scheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
if (mergeScheduler.equals(NoMergeScheduler.class.getName())) {
iwConf.setMergeScheduler(NoMergeScheduler.INSTANCE);
} else {
try {
iwConf.setMergeScheduler(Class.forName(mergeScheduler).asSubclass(MergeScheduler.class).newInstance());
} catch (Exception e) {
throw new RuntimeException("unable to instantiate class '" + mergeScheduler + "' as merge scheduler", e);
}
if (mergeScheduler.equals("org.apache.lucene.index.ConcurrentMergeScheduler")) {
ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) iwConf.getMergeScheduler();
int maxThreadCount = config.get("concurrent.merge.scheduler.max.thread.count", ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
int maxMergeCount = config.get("concurrent.merge.scheduler.max.merge.count", ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
}
}
final String defaultCodec = config.get("default.codec", null);
if (defaultCodec != null) {
try {
Class<? extends Codec> clazz = Class.forName(defaultCodec).asSubclass(Codec.class);
iwConf.setCodec(clazz.newInstance());
} catch (Exception e) {
throw new RuntimeException("Couldn't instantiate Codec: " + defaultCodec, e);
}
}
final String postingsFormat = config.get("codec.postingsFormat", null);
if (defaultCodec == null && postingsFormat != null) {
try {
final PostingsFormat postingsFormatChosen = PostingsFormat.forName(postingsFormat);
iwConf.setCodec(new Lucene70Codec() {
@Override
public PostingsFormat getPostingsFormatForField(String field) {
return postingsFormatChosen;
}
});
} catch (Exception e) {
throw new RuntimeException("Couldn't instantiate Postings Format: " + postingsFormat, e);
}
}
final String mergePolicy = config.get("merge.policy", "org.apache.lucene.index.LogByteSizeMergePolicy");
boolean isCompound = config.get("compound", true);
iwConf.setUseCompoundFile(isCompound);
if (mergePolicy.equals(NoMergePolicy.class.getName())) {
iwConf.setMergePolicy(NoMergePolicy.INSTANCE);
} else {
try {
iwConf.setMergePolicy(Class.forName(mergePolicy).asSubclass(MergePolicy.class).newInstance());
} catch (Exception e) {
throw new RuntimeException("unable to instantiate class '" + mergePolicy + "' as merge policy", e);
}
iwConf.getMergePolicy().setNoCFSRatio(isCompound ? 1.0 : 0.0);
if (iwConf.getMergePolicy() instanceof LogMergePolicy) {
LogMergePolicy logMergePolicy = (LogMergePolicy) iwConf.getMergePolicy();
logMergePolicy.setMergeFactor(config.get("merge.factor", OpenIndexTask.DEFAULT_MERGE_PFACTOR));
}
}
final double ramBuffer = config.get("ram.flush.mb", OpenIndexTask.DEFAULT_RAM_FLUSH_MB);
final int maxBuffered = config.get("max.buffered", OpenIndexTask.DEFAULT_MAX_BUFFERED);
if (maxBuffered == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
iwConf.setRAMBufferSizeMB(ramBuffer);
iwConf.setMaxBufferedDocs(maxBuffered);
} else {
iwConf.setMaxBufferedDocs(maxBuffered);
iwConf.setRAMBufferSizeMB(ramBuffer);
}
return iwConf;
}
use of org.apache.lucene.index.LogMergePolicy in project OpenOLAT by OpenOLAT.
the class OlatFullIndexer method newLogMergePolicy.
public LogMergePolicy newLogMergePolicy() {
LogMergePolicy logmp = new LogDocMergePolicy();
logmp.setCalibrateSizeByDeletes(true);
logmp.setMergeFactor(INDEX_MERGE_FACTOR);
return logmp;
}
Aggregations