use of org.apache.lucene.index.LogByteSizeMergePolicy in project lucene-solr by apache.
the class TestDirectoryTaxonomyReader method testOpenIfChangedNoChangesButSegmentMerges.
@Test
public void testOpenIfChangedNoChangesButSegmentMerges() throws Exception {
// test openIfChanged() when the taxonomy hasn't really changed, but segments
// were merged. The NRT reader will be reopened, and ParentArray used to assert
// that the new reader contains more ordinals than were given from the old
// TaxReader version
Directory dir = newDirectory();
// hold onto IW to forceMerge
// note how we don't close it, since DTW will close it.
final IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogByteSizeMergePolicy()));
DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir) {
@Override
protected IndexWriter openIndexWriter(Directory directory, IndexWriterConfig config) throws IOException {
return iw;
}
};
// add a category so that the following DTR open will cause a flush and
// a new segment will be created
writer.addCategory(new FacetLabel("a"));
TaxonomyReader reader = new DirectoryTaxonomyReader(writer);
assertEquals(2, reader.getSize());
assertEquals(2, reader.getParallelTaxonomyArrays().parents().length);
// merge all the segments so that NRT reader thinks there's a change
iw.forceMerge(1);
// now calling openIfChanged should trip on the wrong assert in ParetArray's ctor
TaxonomyReader newtr = TaxonomyReader.openIfChanged(reader);
assertNotNull(newtr);
reader.close();
reader = newtr;
assertEquals(2, reader.getSize());
assertEquals(2, reader.getParallelTaxonomyArrays().parents().length);
reader.close();
writer.close();
dir.close();
}
use of org.apache.lucene.index.LogByteSizeMergePolicy in project lucene-solr by apache.
the class FileBasedSpellChecker method loadExternalFileDictionary.
private void loadExternalFileDictionary(SolrCore core, SolrIndexSearcher searcher) {
try {
IndexSchema schema = null == searcher ? core.getLatestSchema() : searcher.getSchema();
// Get the field's analyzer
if (fieldTypeName != null && schema.getFieldTypeNoEx(fieldTypeName) != null) {
FieldType fieldType = schema.getFieldTypes().get(fieldTypeName);
// Do index-time analysis using the given fieldType's analyzer
RAMDirectory ramDir = new RAMDirectory();
LogMergePolicy mp = new LogByteSizeMergePolicy();
mp.setMergeFactor(300);
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(fieldType.getIndexAnalyzer()).setMaxBufferedDocs(150).setMergePolicy(mp).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
List<String> lines = core.getResourceLoader().getLines(sourceLocation, characterEncoding);
for (String s : lines) {
Document d = new Document();
d.add(new TextField(WORD_FIELD_NAME, s, Field.Store.NO));
writer.addDocument(d);
}
writer.forceMerge(1);
writer.close();
dictionary = new HighFrequencyDictionary(DirectoryReader.open(ramDir), WORD_FIELD_NAME, 0.0f);
} else {
// check if character encoding is defined
if (characterEncoding == null) {
dictionary = new PlainTextDictionary(core.getResourceLoader().openResource(sourceLocation));
} else {
dictionary = new PlainTextDictionary(new InputStreamReader(core.getResourceLoader().openResource(sourceLocation), characterEncoding));
}
}
} catch (IOException e) {
log.error("Unable to load spellings", e);
}
}
Aggregations