Search in sources :

Example 16 with WhitespaceAnalyzer

use of org.apache.lucene.analysis.core.WhitespaceAnalyzer in project fess by codelibs.

the class QueryHelperTest method setUp.

@Override
public void setUp() throws Exception {
    super.setUp();
    queryHelper = new QueryHelper() {

        protected QueryParser getQueryParser() {
            ExtendableQueryParser queryParser = new ExtendableQueryParser(Constants.DEFAULT_FIELD, new WhitespaceAnalyzer());
            queryParser.setLowercaseExpandedTerms(false);
            queryParser.setAllowLeadingWildcard(true);
            queryParser.setDefaultOperator(QueryParser.Operator.AND);
            return queryParser;
        }
    };
    final FessConfig fessConfig = ComponentUtil.getFessConfig();
    registerMockInstance(fessConfig);
    registerMockInstance(new SystemHelper());
    inject(queryHelper);
    queryHelper.init();
}
Also used : WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) QueryParser(org.apache.lucene.queryparser.classic.QueryParser) ExtendableQueryParser(org.apache.lucene.queryparser.ext.ExtendableQueryParser) FessConfig(org.codelibs.fess.mylasta.direction.FessConfig) ExtendableQueryParser(org.apache.lucene.queryparser.ext.ExtendableQueryParser)

Example 17 with WhitespaceAnalyzer

use of org.apache.lucene.analysis.core.WhitespaceAnalyzer in project elasticsearch by elastic.

the class NoisyChannelSpellCheckerTests method testNgram.

public void testNgram() throws IOException {
    RAMDirectory dir = new RAMDirectory();
    Map<String, Analyzer> mapping = new HashMap<>();
    mapping.put("body_ngram", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            ShingleFilter tf = new ShingleFilter(t, 2, 3);
            tf.setOutputUnigrams(false);
            return new TokenStreamComponents(t, new LowerCaseFilter(tf));
        }
    });
    mapping.put("body", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            return new TokenStreamComponents(t, new LowerCaseFilter(t));
        }
    });
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
    IndexWriterConfig conf = new IndexWriterConfig(wrapper);
    IndexWriter writer = new IndexWriter(dir, conf);
    String[] strings = new String[] { "Xorr the God-Jewel", "Grog the God-Crusher", "Xorn", "Walter Newell", "Wanda Maximoff", "Captain America", "American Ace", "USA Hero", "Wundarr the Aquarian", "Will o' the Wisp", "Xemnu the Titan", "Fantastic Four", "Quasar", "Quasar II" };
    for (String line : strings) {
        Document doc = new Document();
        doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
        doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
        writer.addDocument(doc);
    }
    DirectoryReader ir = DirectoryReader.open(writer);
    WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
    NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
    DirectSpellChecker spellchecker = new DirectSpellChecker();
    spellchecker.setMinQueryLength(1);
    DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
    Result result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2);
    Correction[] corrections = result.corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ace"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american <em>ace</em>"));
    assertThat(result.cutoffScore, greaterThan(0d));
    result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 0, 1);
    corrections = result.corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ame"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american ame"));
    assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE));
    suggester = new NoisyChannelSpellChecker(0.85);
    wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
    assertThat(corrections.length, equalTo(4));
    assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
    assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
    assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
    assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor the <em>god</em> jewel"));
    assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn</em> the <em>god</em> jewel"));
    assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the got jewel"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections.length, equalTo(4));
    assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
    assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
    assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
    // Test some of the highlighting corner cases
    suggester = new NoisyChannelSpellChecker(0.85);
    wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections.length, equalTo(4));
    assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
    assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
    assertThat(corrections[3].join(space).utf8ToString(), equalTo("xor teh god jewel"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr the god</em> jewel"));
    assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor <em>the god</em> jewel"));
    assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn the god</em> jewel"));
    assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("xor teh <em>god</em> jewel"));
    // test synonyms
    Analyzer analyzer = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            TokenFilter filter = new LowerCaseFilter(t);
            try {
                SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer());
                parser.parse(new StringReader("usa => usa, america, american"));
                filter = new SynonymFilter(filter, parser.build(), true);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            return new TokenStreamComponents(t, filter);
        }
    };
    spellchecker.setAccuracy(0.0f);
    spellchecker.setMinPrefix(1);
    spellchecker.setMinQueryLength(1);
    suggester = new NoisyChannelSpellChecker(0.85);
    wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
    corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections[0].join(space).utf8ToString(), equalTo("captain america"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
    generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
    corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
    // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter
    generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
    corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
    assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain <em>america</em>"));
}
Also used : HashMap(java.util.HashMap) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) Analyzer(org.apache.lucene.analysis.Analyzer) Document(org.apache.lucene.document.Document) Result(org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) ShingleFilter(org.apache.lucene.analysis.shingle.ShingleFilter) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker) BytesRef(org.apache.lucene.util.BytesRef) TokenFilter(org.apache.lucene.analysis.TokenFilter) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) DirectoryReader(org.apache.lucene.index.DirectoryReader) SynonymFilter(org.apache.lucene.analysis.synonym.SynonymFilter) RAMDirectory(org.apache.lucene.store.RAMDirectory) IOException(java.io.IOException) PerFieldAnalyzerWrapper(org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper) IndexWriter(org.apache.lucene.index.IndexWriter) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) SolrSynonymParser(org.apache.lucene.analysis.synonym.SolrSynonymParser) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 18 with WhitespaceAnalyzer

use of org.apache.lucene.analysis.core.WhitespaceAnalyzer in project elasticsearch by elastic.

the class NoisyChannelSpellCheckerTests method testTrigram.

public void testTrigram() throws IOException {
    RAMDirectory dir = new RAMDirectory();
    Map<String, Analyzer> mapping = new HashMap<>();
    mapping.put("body_ngram", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            ShingleFilter tf = new ShingleFilter(t, 2, 3);
            tf.setOutputUnigrams(false);
            return new TokenStreamComponents(t, new LowerCaseFilter(tf));
        }
    });
    mapping.put("body", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            return new TokenStreamComponents(t, new LowerCaseFilter(t));
        }
    });
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
    IndexWriterConfig conf = new IndexWriterConfig(wrapper);
    IndexWriter writer = new IndexWriter(dir, conf);
    String[] strings = new String[] { "Xorr the God-Jewel", "Grog the God-Crusher", "Xorn", "Walter Newell", "Wanda Maximoff", "Captain America", "American Ace", "USA Hero", "Wundarr the Aquarian", "Will o' the Wisp", "Xemnu the Titan", "Fantastic Four", "Quasar", "Quasar II" };
    for (String line : strings) {
        Document doc = new Document();
        doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
        doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
        writer.addDocument(doc);
    }
    DirectoryReader ir = DirectoryReader.open(writer);
    WordScorer wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
    NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
    DirectSpellChecker spellchecker = new DirectSpellChecker();
    spellchecker.setMinQueryLength(1);
    DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
    Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 3).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 1).corrections;
    assertThat(corrections.length, equalTo(0));
    //        assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape"));
    wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 3).corrections;
    assertThat(corrections.length, equalTo(4));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
    assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
    assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 3).corrections;
    assertThat(corrections.length, equalTo(4));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
    assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
    assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 100, 3).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    // test synonyms
    Analyzer analyzer = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            TokenFilter filter = new LowerCaseFilter(t);
            try {
                SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer());
                parser.parse(new StringReader("usa => usa, america, american"));
                filter = new SynonymFilter(filter, parser.build(), true);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            return new TokenStreamComponents(t, filter);
        }
    };
    spellchecker.setAccuracy(0.0f);
    spellchecker.setMinPrefix(1);
    spellchecker.setMinQueryLength(1);
    suggester = new NoisyChannelSpellChecker(0.95);
    wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5, 0.4, 0.1);
    corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
    generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
    corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
    wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.4);
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2, ir, "body", wordScorer, 0, 3).corrections;
    assertThat(corrections.length, equalTo(2));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
}
Also used : HashMap(java.util.HashMap) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) Analyzer(org.apache.lucene.analysis.Analyzer) Document(org.apache.lucene.document.Document) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) ShingleFilter(org.apache.lucene.analysis.shingle.ShingleFilter) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker) BytesRef(org.apache.lucene.util.BytesRef) TokenFilter(org.apache.lucene.analysis.TokenFilter) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) DirectoryReader(org.apache.lucene.index.DirectoryReader) SynonymFilter(org.apache.lucene.analysis.synonym.SynonymFilter) RAMDirectory(org.apache.lucene.store.RAMDirectory) IOException(java.io.IOException) PerFieldAnalyzerWrapper(org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper) IndexWriter(org.apache.lucene.index.IndexWriter) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) SolrSynonymParser(org.apache.lucene.analysis.synonym.SolrSynonymParser) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 19 with WhitespaceAnalyzer

use of org.apache.lucene.analysis.core.WhitespaceAnalyzer in project elasticsearch by elastic.

the class CandidateQueryTests method init.

@Before
public void init() throws Exception {
    directory = newDirectory();
    IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer());
    config.setMergePolicy(NoMergePolicy.INSTANCE);
    indexWriter = new IndexWriter(directory, config);
    String indexName = "test";
    IndexService indexService = createIndex(indexName, Settings.EMPTY);
    mapperService = indexService.mapperService();
    String mapper = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("int_field").field("type", "integer").endObject().startObject("long_field").field("type", "long").endObject().startObject("half_float_field").field("type", "half_float").endObject().startObject("float_field").field("type", "float").endObject().startObject("double_field").field("type", "double").endObject().startObject("ip_field").field("type", "ip").endObject().startObject("field").field("type", "keyword").endObject().endObject().endObject().endObject().string();
    documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true);
    String queryField = "query_field";
    String mappingType = "query";
    String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(mappingType).startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject().endObject().endObject().string();
    mapperService.merge(mappingType, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
    fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper(mappingType).mappers().getMapper(queryField);
    fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType();
    queries = new ArrayList<>();
    queryStore = ctx -> docId -> this.queries.get(docId);
}
Also used : WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) IndexWriter(org.apache.lucene.index.IndexWriter) IndexService(org.elasticsearch.index.IndexService) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Before(org.junit.Before)

Example 20 with WhitespaceAnalyzer

use of org.apache.lucene.analysis.core.WhitespaceAnalyzer in project elasticsearch by elastic.

the class CandidateQueryTests method testDuelSpecificQueries.

public void testDuelSpecificQueries() throws Exception {
    List<ParseContext.Document> documents = new ArrayList<>();
    CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128);
    commonTermsQuery.add(new Term("field", "quick"));
    commonTermsQuery.add(new Term("field", "brown"));
    commonTermsQuery.add(new Term("field", "fox"));
    addQuery(commonTermsQuery, documents);
    BlendedTermQuery blendedTermQuery = BlendedTermQuery.booleanBlendedQuery(new Term[] { new Term("field", "quick"), new Term("field", "brown"), new Term("field", "fox") }, false);
    addQuery(blendedTermQuery, documents);
    SpanNearQuery spanNearQuery = new SpanNearQuery.Builder("field", true).addClause(new SpanTermQuery(new Term("field", "quick"))).addClause(new SpanTermQuery(new Term("field", "brown"))).addClause(new SpanTermQuery(new Term("field", "fox"))).build();
    addQuery(spanNearQuery, documents);
    SpanNearQuery spanNearQuery2 = new SpanNearQuery.Builder("field", true).addClause(new SpanTermQuery(new Term("field", "the"))).addClause(new SpanTermQuery(new Term("field", "lazy"))).addClause(new SpanTermQuery(new Term("field", "doc"))).build();
    SpanOrQuery spanOrQuery = new SpanOrQuery(spanNearQuery, spanNearQuery2);
    addQuery(spanOrQuery, documents);
    SpanNotQuery spanNotQuery = new SpanNotQuery(spanNearQuery, spanNearQuery);
    addQuery(spanNotQuery, documents);
    long lowerLong = randomIntBetween(0, 256);
    long upperLong = lowerLong + randomIntBetween(0, 32);
    addQuery(LongPoint.newRangeQuery("long_field", lowerLong, upperLong), documents);
    indexWriter.addDocuments(documents);
    indexWriter.close();
    directoryReader = DirectoryReader.open(directory);
    IndexSearcher shardSearcher = newSearcher(directoryReader);
    // Disable query cache, because ControlQuery cannot be cached...
    shardSearcher.setQueryCache(null);
    Document document = new Document();
    document.add(new TextField("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO));
    long randomLong = randomIntBetween((int) lowerLong, (int) upperLong);
    document.add(new LongPoint("long_field", randomLong));
    MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
    duelRun(queryStore, memoryIndex, shardSearcher);
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) ArrayList(java.util.ArrayList) BlendedTermQuery(org.apache.lucene.queries.BlendedTermQuery) Term(org.apache.lucene.index.Term) LongPoint(org.apache.lucene.document.LongPoint) Document(org.apache.lucene.document.Document) SpanOrQuery(org.apache.lucene.search.spans.SpanOrQuery) CommonTermsQuery(org.apache.lucene.queries.CommonTermsQuery) SpanNotQuery(org.apache.lucene.search.spans.SpanNotQuery) MemoryIndex(org.apache.lucene.index.memory.MemoryIndex) SpanTermQuery(org.apache.lucene.search.spans.SpanTermQuery) TextField(org.apache.lucene.document.TextField) SpanNearQuery(org.apache.lucene.search.spans.SpanNearQuery)

Aggregations

WhitespaceAnalyzer (org.apache.lucene.analysis.core.WhitespaceAnalyzer)37 IndexWriter (org.apache.lucene.index.IndexWriter)17 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)17 Document (org.apache.lucene.document.Document)16 Analyzer (org.apache.lucene.analysis.Analyzer)9 Test (org.junit.Test)9 NamedList (org.apache.solr.common.util.NamedList)8 ArrayList (java.util.ArrayList)7 Token (org.apache.lucene.analysis.Token)7 TextField (org.apache.lucene.document.TextField)7 IndexSearcher (org.apache.lucene.search.IndexSearcher)6 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 Field (org.apache.lucene.document.Field)5 DirectoryTaxonomyWriter (org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter)5 DirectoryReader (org.apache.lucene.index.DirectoryReader)5 TokenStream (org.apache.lucene.analysis.TokenStream)4 PerFieldAnalyzerWrapper (org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper)4 LongPoint (org.apache.lucene.document.LongPoint)4 BooleanQuery (org.apache.lucene.search.BooleanQuery)4