Search in sources :

Example 61 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project crate by crate.

the class LuceneBatchIteratorBenchmark method createLuceneBatchIterator.

@Setup
public void createLuceneBatchIterator() throws Exception {
    IndexWriter iw = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()));
    String columnName = "x";
    for (int i = 0; i < 10_000_000; i++) {
        Document doc = new Document();
        doc.add(new NumericDocValuesField(columnName, i));
        iw.addDocument(doc);
    }
    iw.commit();
    iw.forceMerge(1, true);
    indexSearcher = new IndexSearcher(DirectoryReader.open(iw, true));
    IntegerColumnReference columnReference = new IntegerColumnReference(columnName);
    columnRefs = Collections.singletonList(columnReference);
    collectorContext = new CollectorContext(mock(IndexFieldDataService.class), new CollectorFieldsVisitor(0));
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) IndexWriter(org.apache.lucene.index.IndexWriter) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) IntegerColumnReference(io.crate.operation.reference.doc.lucene.IntegerColumnReference) CollectorContext(io.crate.operation.reference.doc.lucene.CollectorContext) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 62 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project crate by crate.

the class LuceneBatchIteratorTest method prepareSearcher.

@Before
public void prepareSearcher() throws Exception {
    IndexWriter iw = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()));
    String columnName = "x";
    expectedResult = new ArrayList<>(20);
    for (long i = 0; i < 20; i++) {
        Document doc = new Document();
        doc.add(new NumericDocValuesField(columnName, i));
        iw.addDocument(doc);
        expectedResult.add(new Object[] { i });
    }
    iw.commit();
    indexSearcher = new IndexSearcher(DirectoryReader.open(iw, true));
    LongColumnReference columnReference = new LongColumnReference(columnName);
    columnRefs = Collections.singletonList(columnReference);
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) IndexWriter(org.apache.lucene.index.IndexWriter) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) LongColumnReference(io.crate.operation.reference.doc.lucene.LongColumnReference) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Before(org.junit.Before)

Example 63 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project crate by crate.

the class OrderedLuceneBatchIteratorFactoryTest method prepareSearchers.

@Before
public void prepareSearchers() throws Exception {
    IndexWriter iw1 = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()));
    IndexWriter iw2 = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()));
    expectedResult = LongStream.range(0, 20).mapToObj(i -> new Object[] { i }).collect(Collectors.toList());
    // expect descending order to differentiate between insert order
    expectedResult.sort(Comparator.comparingLong((Object[] o) -> ((long) o[0])).reversed());
    for (int i = 0; i < 20; i++) {
        Document doc = new Document();
        doc.add(new NumericDocValuesField(columnName, i));
        if (i % 2 == 0) {
            iw1.addDocument(doc);
        } else {
            iw2.addDocument(doc);
        }
    }
    iw1.commit();
    iw2.commit();
    searcher1 = new IndexSearcher(DirectoryReader.open(iw1, true));
    searcher2 = new IndexSearcher(DirectoryReader.open(iw2, true));
    fieldTypeLookup = columnName -> {
        LongFieldMapper.LongFieldType longFieldType = new LongFieldMapper.LongFieldType();
        longFieldType.setNames(new MappedFieldType.Names(columnName));
        return longFieldType;
    };
    orderBy = new OrderBy(Collections.singletonList(reference), reverseFlags, nullsFirst);
}
Also used : OrderBy(io.crate.analyze.OrderBy) LongFieldMapper(org.elasticsearch.index.mapper.core.LongFieldMapper) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) IndexWriter(org.apache.lucene.index.IndexWriter) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) MappedFieldType(org.elasticsearch.index.mapper.MappedFieldType) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Before(org.junit.Before)

Example 64 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class BitSetFilterCacheTests method testListener.

public void testListener() throws IOException {
    IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
    Document document = new Document();
    document.add(new StringField("field", "value", Field.Store.NO));
    writer.addDocument(document);
    writer.commit();
    final DirectoryReader writerReader = DirectoryReader.open(writer);
    final IndexReader reader = ElasticsearchDirectoryReader.wrap(writerReader, new ShardId("test", "_na_", 0));
    final AtomicLong stats = new AtomicLong();
    final AtomicInteger onCacheCalls = new AtomicInteger();
    final AtomicInteger onRemoveCalls = new AtomicInteger();
    final BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {

        @Override
        public void onCache(ShardId shardId, Accountable accountable) {
            onCacheCalls.incrementAndGet();
            stats.addAndGet(accountable.ramBytesUsed());
            if (writerReader != reader) {
                assertNotNull(shardId);
                assertEquals("test", shardId.getIndexName());
                assertEquals(0, shardId.id());
            } else {
                assertNull(shardId);
            }
        }

        @Override
        public void onRemoval(ShardId shardId, Accountable accountable) {
            onRemoveCalls.incrementAndGet();
            stats.addAndGet(-accountable.ramBytesUsed());
            if (writerReader != reader) {
                assertNotNull(shardId);
                assertEquals("test", shardId.getIndexName());
                assertEquals(0, shardId.id());
            } else {
                assertNull(shardId);
            }
        }
    });
    BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value")));
    assertThat(matchCount(filter, reader), equalTo(1));
    assertTrue(stats.get() > 0);
    assertEquals(1, onCacheCalls.get());
    assertEquals(0, onRemoveCalls.get());
    IOUtils.close(reader, writer);
    assertEquals(1, onRemoveCalls.get());
    assertEquals(0, stats.get());
}
Also used : TermQuery(org.apache.lucene.search.TermQuery) ElasticsearchDirectoryReader(org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) Accountable(org.apache.lucene.util.Accountable) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) ShardId(org.elasticsearch.index.shard.ShardId) AtomicLong(java.util.concurrent.atomic.AtomicLong) LogByteSizeMergePolicy(org.apache.lucene.index.LogByteSizeMergePolicy) IndexWriter(org.apache.lucene.index.IndexWriter) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BitSetProducer(org.apache.lucene.search.join.BitSetProducer) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) StringField(org.apache.lucene.document.StringField) IndexReader(org.apache.lucene.index.IndexReader) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 65 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class BitSetFilterCacheTests method testInvalidateEntries.

public void testInvalidateEntries() throws Exception {
    IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
    Document document = new Document();
    document.add(new StringField("field", "value", Field.Store.NO));
    writer.addDocument(document);
    writer.commit();
    document = new Document();
    document.add(new StringField("field", "value", Field.Store.NO));
    writer.addDocument(document);
    writer.commit();
    document = new Document();
    document.add(new StringField("field", "value", Field.Store.NO));
    writer.addDocument(document);
    writer.commit();
    DirectoryReader reader = DirectoryReader.open(writer);
    reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0));
    IndexSearcher searcher = new IndexSearcher(reader);
    BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {

        @Override
        public void onCache(ShardId shardId, Accountable accountable) {
        }

        @Override
        public void onRemoval(ShardId shardId, Accountable accountable) {
        }
    });
    BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value")));
    assertThat(matchCount(filter, reader), equalTo(3));
    // now cached
    assertThat(matchCount(filter, reader), equalTo(3));
    // There are 3 segments
    assertThat(cache.getLoadedFilters().weight(), equalTo(3L));
    writer.forceMerge(1);
    reader.close();
    reader = DirectoryReader.open(writer);
    reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0));
    searcher = new IndexSearcher(reader);
    assertThat(matchCount(filter, reader), equalTo(3));
    // now cached
    assertThat(matchCount(filter, reader), equalTo(3));
    // Only one segment now, so the size must be 1
    assertThat(cache.getLoadedFilters().weight(), equalTo(1L));
    reader.close();
    writer.close();
    // There is no reference from readers and writer to any segment in the test index, so the size in the fbs cache must be 0
    assertThat(cache.getLoadedFilters().weight(), equalTo(0L));
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) ElasticsearchDirectoryReader(org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) Accountable(org.apache.lucene.util.Accountable) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) ShardId(org.elasticsearch.index.shard.ShardId) LogByteSizeMergePolicy(org.apache.lucene.index.LogByteSizeMergePolicy) IndexWriter(org.apache.lucene.index.IndexWriter) BitSetProducer(org.apache.lucene.search.join.BitSetProducer) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) StringField(org.apache.lucene.document.StringField) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

StandardAnalyzer (org.apache.lucene.analysis.standard.StandardAnalyzer)112 Analyzer (org.apache.lucene.analysis.Analyzer)37 IndexWriter (org.apache.lucene.index.IndexWriter)36 Document (org.apache.lucene.document.Document)29 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)29 IndexSearcher (org.apache.lucene.search.IndexSearcher)24 Term (org.apache.lucene.index.Term)22 RAMDirectory (org.apache.lucene.store.RAMDirectory)21 Test (org.junit.Test)21 Query (org.apache.lucene.search.Query)20 BooleanQuery (org.apache.lucene.search.BooleanQuery)19 TermQuery (org.apache.lucene.search.TermQuery)19 IOException (java.io.IOException)16 Before (org.junit.Before)15 IndexReader (org.apache.lucene.index.IndexReader)14 HashMap (java.util.HashMap)13 Field (org.apache.lucene.document.Field)13 ArrayList (java.util.ArrayList)12 QueryParser (org.apache.lucene.queryparser.classic.QueryParser)12 Directory (org.apache.lucene.store.Directory)12