use of org.apache.lucene.document.Document in project elasticsearch by elastic.
the class MultiPhrasePrefixQueryTests method testSimple.
public void testSimple() throws Exception {
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
writer.addDocument(doc);
IndexReader reader = DirectoryReader.open(writer);
IndexSearcher searcher = new IndexSearcher(reader);
MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();
query.add(new Term("field", "aa"));
assertThat(searcher.count(query), equalTo(1));
query = new MultiPhrasePrefixQuery();
query.add(new Term("field", "aaa"));
query.add(new Term("field", "bb"));
assertThat(searcher.count(query), equalTo(1));
query = new MultiPhrasePrefixQuery();
query.setSlop(1);
query.add(new Term("field", "aaa"));
query.add(new Term("field", "cc"));
assertThat(searcher.count(query), equalTo(1));
query = new MultiPhrasePrefixQuery();
query.setSlop(1);
query.add(new Term("field", "xxx"));
assertThat(searcher.count(query), equalTo(0));
}
use of org.apache.lucene.document.Document in project elasticsearch by elastic.
the class MoreLikeThisQueryTests method testSimple.
public void testSimple() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
indexWriter.commit();
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
document.add(new TextField("text", "lucene", Field.Store.YES));
indexWriter.addDocument(document);
document = new Document();
document.add(new TextField("_id", "2", Field.Store.YES));
document.add(new TextField("text", "lucene release", Field.Store.YES));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[] { "text" }, Lucene.STANDARD_ANALYZER);
mltQuery.setLikeText("lucene");
mltQuery.setMinTermFrequency(1);
mltQuery.setMinDocFreq(1);
long count = searcher.count(mltQuery);
assertThat(count, equalTo(2L));
reader.close();
indexWriter.close();
}
use of org.apache.lucene.document.Document in project elasticsearch by elastic.
the class XMoreLikeThisTests method addDoc.
private void addDoc(RandomIndexWriter writer, String[] texts) throws IOException {
Document doc = new Document();
for (String text : texts) {
doc.add(newTextField("text", text, Field.Store.YES));
}
writer.addDocument(doc);
}
use of org.apache.lucene.document.Document in project elasticsearch by elastic.
the class LuceneTests method testCleanIndex.
public void testCleanIndex() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setMaxBufferedDocs(2);
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
doc = new Document();
doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
doc = new Document();
doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.deleteDocuments(new Term("id", "2"));
writer.commit();
try (DirectoryReader open = DirectoryReader.open(writer)) {
assertEquals(3, open.numDocs());
assertEquals(1, open.numDeletedDocs());
assertEquals(4, open.maxDoc());
}
writer.close();
if (random().nextBoolean()) {
for (String file : dir.listAll()) {
if (file.startsWith("_1")) {
// delete a random file
dir.deleteFile(file);
break;
}
}
}
Lucene.cleanLuceneIndex(dir);
if (dir.listAll().length > 0) {
for (String file : dir.listAll()) {
if (file.startsWith("extra") == false) {
assertEquals(file, "write.lock");
}
}
}
dir.close();
}
use of org.apache.lucene.document.Document in project elasticsearch by elastic.
the class LuceneTests method testNumDocs.
public void testNumDocs() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir);
assertEquals(1, Lucene.getNumDocs(segmentCommitInfos));
doc = new Document();
doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
segmentCommitInfos = Lucene.readSegmentInfos(dir);
assertEquals(1, Lucene.getNumDocs(segmentCommitInfos));
writer.commit();
segmentCommitInfos = Lucene.readSegmentInfos(dir);
assertEquals(3, Lucene.getNumDocs(segmentCommitInfos));
writer.deleteDocuments(new Term("id", "2"));
writer.commit();
segmentCommitInfos = Lucene.readSegmentInfos(dir);
assertEquals(2, Lucene.getNumDocs(segmentCommitInfos));
int numDocsToIndex = randomIntBetween(10, 50);
List<Term> deleteTerms = new ArrayList<>();
for (int i = 0; i < numDocsToIndex; i++) {
doc = new Document();
doc.add(new TextField("id", "extra_" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
deleteTerms.add(new Term("id", "extra_" + i));
writer.addDocument(doc);
}
int numDocsToDelete = randomIntBetween(0, numDocsToIndex);
Collections.shuffle(deleteTerms, random());
for (int i = 0; i < numDocsToDelete; i++) {
Term remove = deleteTerms.remove(0);
writer.deleteDocuments(remove);
}
writer.commit();
segmentCommitInfos = Lucene.readSegmentInfos(dir);
assertEquals(2 + deleteTerms.size(), Lucene.getNumDocs(segmentCommitInfos));
writer.close();
dir.close();
}
Aggregations