use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class VectorHighlighterTests method testVectorHighlighterNoTermVector.
public void testVectorHighlighterNoTermVector() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
document.add(new TextField("content", "the big bad dog", Field.Store.YES));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
assertThat(topDocs.totalHits, equalTo(1));
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), reader, topDocs.scoreDocs[0].doc, "content", 30);
assertThat(fragment, nullValue());
}
use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class VectorHighlighterTests method testVectorHighlighter.
public void testVectorHighlighter() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
vectorsType.setStoreTermVectors(true);
vectorsType.setStoreTermVectorPositions(true);
vectorsType.setStoreTermVectorOffsets(true);
document.add(new Field("content", "the big bad dog", vectorsType));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
assertThat(topDocs.totalHits, equalTo(1));
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), reader, topDocs.scoreDocs[0].doc, "content", 30);
assertThat(fragment, notNullValue());
assertThat(fragment, equalTo("the big <b>bad</b> dog"));
}
use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class IndexFieldDataServiceTests method testFieldDataCacheListener.
public void testFieldDataCacheListener() throws Exception {
final IndexService indexService = createIndex("test");
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
// copy the ifdService since we can set the listener only once.
final IndexFieldDataService ifdService = new IndexFieldDataService(indexService.getIndexSettings(), indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
final MappedFieldType mapper1 = new TextFieldMapper.Builder("s").fielddata(true).build(ctx).fieldType();
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
Document doc = new Document();
doc.add(new StringField("s", "thisisastring", Store.NO));
writer.addDocument(doc);
DirectoryReader open = DirectoryReader.open(writer);
final boolean wrap = randomBoolean();
final IndexReader reader = wrap ? ElasticsearchDirectoryReader.wrap(open, new ShardId("test", "_na_", 1)) : open;
final AtomicInteger onCacheCalled = new AtomicInteger();
final AtomicInteger onRemovalCalled = new AtomicInteger();
ifdService.setListener(new IndexFieldDataCache.Listener() {
@Override
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onCacheCalled.incrementAndGet();
}
@Override
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onRemovalCalled.incrementAndGet();
}
});
IndexFieldData<?> ifd = ifdService.getForField(mapper1);
LeafReaderContext leafReaderContext = reader.getContext().leaves().get(0);
AtomicFieldData load = ifd.load(leafReaderContext);
assertEquals(1, onCacheCalled.get());
assertEquals(0, onRemovalCalled.get());
reader.close();
load.close();
writer.close();
assertEquals(1, onCacheCalled.get());
assertEquals(1, onRemovalCalled.get());
ifdService.clear();
}
use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class DateFieldTypeTests method testIsFieldWithinQueryEmptyReader.
public void testIsFieldWithinQueryEmptyReader() throws IOException {
QueryRewriteContext context = new QueryRewriteContext(null, null, null, xContentRegistry(), null, null, () -> nowInMillis);
IndexReader reader = new MultiReader();
DateFieldType ft = new DateFieldType();
ft.setName("my_date");
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", randomBoolean(), randomBoolean(), null, null, context));
}
use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class DoubleIndexingDocTests method testDoubleIndexingSameDoc.
public void testDoubleIndexingSameDoc() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), Lucene.STANDARD_ANALYZER));
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").endObject().endObject().endObject().string();
IndexService index = createIndex("test");
client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get();
DocumentMapper mapper = index.mapperService().documentMapper("type");
QueryShardContext context = index.newQueryShardContext(0, null, () -> 0L);
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject().field("field1", "value1").field("field2", 1).field("field3", 1.1).field("field4", "2010-01-01").startArray("field5").value(1).value(2).value(3).endArray().endObject().bytes());
assertNotNull(doc.dynamicMappingsUpdate());
client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get();
mapper = index.mapperService().documentMapper("type");
writer.addDocument(doc.rootDoc());
writer.addDocument(doc.rootDoc());
IndexReader reader = DirectoryReader.open(writer);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", context), 10);
assertThat(topDocs.totalHits, equalTo(2));
writer.close();
reader.close();
dir.close();
}
Aggregations