use of org.apache.lucene.index.LogDocMergePolicy in project lucene-solr by apache.
the class TestBlockJoin method testAdvanceSingleParentNoChild.
public void testAdvanceSingleParentNoChild() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy()));
Document parentDoc = new Document();
parentDoc.add(newStringField("parent", "1", Field.Store.NO));
parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
w.addDocuments(Arrays.asList(parentDoc));
// Add another doc so scorer is not null
parentDoc = new Document();
parentDoc.add(newStringField("parent", "2", Field.Store.NO));
parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
Document childDoc = new Document();
childDoc.add(newStringField("child", "2", Field.Store.NO));
w.addDocuments(Arrays.asList(childDoc, parentDoc));
// Need single seg:
w.forceMerge(1);
IndexReader r = w.getReader();
w.close();
IndexSearcher s = newSearcher(r);
Query tq = new TermQuery(new Term("child", "2"));
BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("isparent", "yes")));
CheckJoinIndex.check(s.getIndexReader(), parentFilter);
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q, true);
Scorer sc = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(2, sc.iterator().advance(0));
r.close();
dir.close();
}
use of org.apache.lucene.index.LogDocMergePolicy in project lucene-solr by apache.
the class TestFieldCache method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
NUM_DOCS = atLeast(500);
NUM_ORDS = atLeast(2);
directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy()));
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
int theInt = Integer.MAX_VALUE;
float theFloat = Float.MAX_VALUE;
unicodeStrings = new String[NUM_DOCS];
multiValued = new BytesRef[NUM_DOCS][NUM_ORDS];
if (VERBOSE) {
System.out.println("TEST: setUp");
}
for (int i = 0; i < NUM_DOCS; i++) {
Document doc = new Document();
doc.add(new LongPoint("theLong", theLong--));
doc.add(new DoublePoint("theDouble", theDouble--));
doc.add(new IntPoint("theInt", theInt--));
doc.add(new FloatPoint("theFloat", theFloat--));
if (i % 2 == 0) {
doc.add(new IntPoint("sparse", i));
}
if (i % 2 == 0) {
doc.add(new IntPoint("numInt", i));
}
// sometimes skip the field:
if (random().nextInt(40) != 17) {
unicodeStrings[i] = generateString(i);
doc.add(newStringField("theRandomUnicodeString", unicodeStrings[i], Field.Store.YES));
}
// sometimes skip the field:
if (random().nextInt(10) != 8) {
for (int j = 0; j < NUM_ORDS; j++) {
String newValue = generateString(i);
multiValued[i][j] = new BytesRef(newValue);
doc.add(newStringField("theRandomUnicodeMultiValuedField", newValue, Field.Store.YES));
}
Arrays.sort(multiValued[i]);
}
writer.addDocument(doc);
}
// this test relies on one segment and docid order
writer.forceMerge(1);
IndexReader r = DirectoryReader.open(writer);
assertEquals(1, r.leaves().size());
reader = r.leaves().get(0).reader();
TestUtil.checkReader(reader);
writer.close();
}
use of org.apache.lucene.index.LogDocMergePolicy in project jackrabbit-oak by apache.
the class IndexDefinition method createMergePolicy.
private MergePolicy createMergePolicy() {
String mmp = System.getProperty("oak.lucene.cmmp");
if (mmp != null) {
return new CommitMitigatingTieredMergePolicy();
}
String mergePolicyName = getOptionalValue(definition, LuceneIndexConstants.MERGE_POLICY_NAME, null);
MergePolicy mergePolicy = null;
if (mergePolicyName != null) {
if (mergePolicyName.equalsIgnoreCase("no")) {
mergePolicy = NoMergePolicy.COMPOUND_FILES;
} else if (mergePolicyName.equalsIgnoreCase("mitigated")) {
mergePolicy = new CommitMitigatingTieredMergePolicy();
} else if (mergePolicyName.equalsIgnoreCase("tiered") || mergePolicyName.equalsIgnoreCase("default")) {
mergePolicy = new TieredMergePolicy();
} else if (mergePolicyName.equalsIgnoreCase("logbyte")) {
mergePolicy = new LogByteSizeMergePolicy();
} else if (mergePolicyName.equalsIgnoreCase("logdoc")) {
mergePolicy = new LogDocMergePolicy();
}
}
if (mergePolicy == null) {
mergePolicy = new TieredMergePolicy();
}
return mergePolicy;
}
use of org.apache.lucene.index.LogDocMergePolicy in project OpenOLAT by OpenOLAT.
the class OlatFullIndexer method newLogMergePolicy.
public LogMergePolicy newLogMergePolicy() {
LogMergePolicy logmp = new LogDocMergePolicy();
logmp.setCalibrateSizeByDeletes(true);
logmp.setMergeFactor(INDEX_MERGE_FACTOR);
return logmp;
}
use of org.apache.lucene.index.LogDocMergePolicy in project OpenOLAT by OpenOLAT.
the class JmsIndexer method newLogMergePolicy.
public LogMergePolicy newLogMergePolicy() {
LogMergePolicy logmp = new LogDocMergePolicy();
logmp.setCalibrateSizeByDeletes(true);
logmp.setMergeFactor(INDEX_MERGE_FACTOR);
return logmp;
}
Aggregations