use of org.apache.lucene.document.FieldType in project lucene-solr by apache.
the class TestBlockPostingsFormat2 method newDocument.
private Document newDocument() {
Document doc = new Document();
for (IndexOptions option : IndexOptions.values()) {
if (option == IndexOptions.NONE) {
continue;
}
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
// turn on tvs for a cross-check, since we rely upon checkindex in this test (for now)
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPositions(true);
ft.setStoreTermVectorPayloads(true);
ft.setIndexOptions(option);
doc.add(new Field(option.toString(), "", ft));
}
return doc;
}
use of org.apache.lucene.document.FieldType in project lucene-solr by apache.
the class TestPerFieldPostingsFormat2 method doTestMixedPostings.
private void doTestMixedPostings(Codec codec) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setCodec(codec);
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
// turn on vectors for the checkindex cross-check
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPositions(true);
Field idField = new Field("id", "", ft);
Field dateField = new Field("date", "", ft);
doc.add(idField);
doc.add(dateField);
for (int i = 0; i < 100; i++) {
idField.setStringValue(Integer.toString(random().nextInt(50)));
dateField.setStringValue(Integer.toString(random().nextInt(100)));
iw.addDocument(doc);
}
iw.close();
// checkindex
dir.close();
}
use of org.apache.lucene.document.FieldType in project lucene-solr by apache.
the class TestPerFieldPostingsFormat2 method testStressPerFieldCodec.
/*
* Test per field codec support - adding fields with random codecs
*/
@Test
public void testStressPerFieldCodec() throws IOException {
Directory dir = newDirectory(random());
final int docsPerRound = 97;
int numRounds = atLeast(1);
for (int i = 0; i < numRounds; i++) {
int num = TestUtil.nextInt(random(), 30, 60);
IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random()));
config.setOpenMode(OpenMode.CREATE_OR_APPEND);
IndexWriter writer = newWriter(dir, config);
for (int j = 0; j < docsPerRound; j++) {
final Document doc = new Document();
for (int k = 0; k < num; k++) {
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.setTokenized(random().nextBoolean());
customType.setOmitNorms(random().nextBoolean());
Field field = newField("" + k, TestUtil.randomRealisticUnicodeString(random(), 128), customType);
doc.add(field);
}
writer.addDocument(doc);
}
if (random().nextBoolean()) {
writer.forceMerge(1);
}
writer.commit();
assertEquals((i + 1) * docsPerRound, writer.maxDoc());
writer.close();
}
dir.close();
}
use of org.apache.lucene.document.FieldType in project lucene-solr by apache.
the class TestIndexWriter method testMaxThreadPriority.
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.setStoreTermVectors(true);
document.add(newField("tvtest", "a b c", customType));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for (int i = 0; i < 4; i++) iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
use of org.apache.lucene.document.FieldType in project lucene-solr by apache.
the class TestIndexWriter method testFlushWithNoMerging.
public void testFlushWithNoMerging() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy(10)));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
doc.add(newField("field", "aaa", customType));
for (int i = 0; i < 19; i++) writer.addDocument(doc);
writer.flush(false, true);
writer.close();
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
// Since we flushed w/o allowing merging we should now
// have 10 segments
assertEquals(10, sis.size());
dir.close();
}
Aggregations