Search in sources :

Example 6 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class ParentFieldMapperTests method testNoParentNullFieldCreatedIfNoParentSpecified.

public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception {
    Index index = new Index("_index", "testUUID");
    IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY);
    NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer());
    IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer, Collections.emptyMap(), Collections.emptyMap());
    SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
    MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry(), similarityService, new IndicesModule(emptyList()).getMapperRegistry(), () -> null);
    XContentBuilder mappingSource = jsonBuilder().startObject().startObject("some_type").startObject("properties").endObject().endObject().endObject();
    mapperService.merge("some_type", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE, false);
    Set<String> allFields = new HashSet<>(mapperService.simpleMatchToIndexNames("*"));
    assertTrue(allFields.contains("_parent"));
    assertFalse(allFields.contains("_parent#null"));
}
Also used : IndicesModule(org.elasticsearch.indices.IndicesModule) NamedAnalyzer(org.elasticsearch.index.analysis.NamedAnalyzer) IndexSettings(org.elasticsearch.index.IndexSettings) Index(org.elasticsearch.index.Index) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) SimilarityService(org.elasticsearch.index.similarity.SimilarityService) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) IndexAnalyzers(org.elasticsearch.index.analysis.IndexAnalyzers) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder) HashSet(java.util.HashSet)

Example 7 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class SimpleQueryParserTests method testAnalyzeWildcard.

public void testAnalyzeWildcard() {
    SimpleQueryParser.Settings settings = new SimpleQueryParser.Settings();
    settings.analyzeWildcard(true);
    Map<String, Float> weights = new HashMap<>();
    weights.put("field1", 1.0f);
    SimpleQueryParser parser = new MockSimpleQueryParser(new StandardAnalyzer(), weights, -1, settings);
    for (Operator op : Operator.values()) {
        BooleanClause.Occur defaultOp = op.toBooleanClauseOccur();
        parser.setDefaultOperator(defaultOp);
        Query query = parser.parse("first foo-bar-foobar* last");
        Query expectedQuery = new BooleanQuery.Builder().add(new BooleanClause(new TermQuery(new Term("field1", "first")), defaultOp)).add(new BooleanQuery.Builder().add(new BooleanClause(new TermQuery(new Term("field1", "foo")), defaultOp)).add(new BooleanClause(new TermQuery(new Term("field1", "bar")), defaultOp)).add(new BooleanClause(new PrefixQuery(new Term("field1", "foobar")), defaultOp)).build(), defaultOp).add(new BooleanClause(new TermQuery(new Term("field1", "last")), defaultOp)).build();
        assertThat(query, equalTo(expectedQuery));
    }
}
Also used : SpanTermQuery(org.apache.lucene.search.spans.SpanTermQuery) TermQuery(org.apache.lucene.search.TermQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) Query(org.apache.lucene.search.Query) SpanTermQuery(org.apache.lucene.search.spans.SpanTermQuery) SpanQuery(org.apache.lucene.search.spans.SpanQuery) SpanNearQuery(org.apache.lucene.search.spans.SpanNearQuery) PrefixQuery(org.apache.lucene.search.PrefixQuery) TermQuery(org.apache.lucene.search.TermQuery) SynonymQuery(org.apache.lucene.search.SynonymQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) SpanOrQuery(org.apache.lucene.search.spans.SpanOrQuery) HashMap(java.util.HashMap) Term(org.apache.lucene.index.Term) BooleanClause(org.apache.lucene.search.BooleanClause) PrefixQuery(org.apache.lucene.search.PrefixQuery) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings)

Example 8 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class SimpleQueryParserTests method testQuoteFieldSuffix.

public void testQuoteFieldSuffix() {
    SimpleQueryParser.Settings sqpSettings = new SimpleQueryParser.Settings();
    sqpSettings.quoteFieldSuffix(".quote");
    Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_INDEX_UUID, "some_uuid").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
    IndexMetaData indexState = IndexMetaData.builder("index").settings(indexSettings).build();
    IndexSettings settings = new IndexSettings(indexState, Settings.EMPTY);
    QueryShardContext mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, xContentRegistry(), null, null, System::currentTimeMillis) {

        @Override
        public MappedFieldType fieldMapper(String name) {
            return new MockFieldMapper.FakeFieldType();
        }
    };
    SimpleQueryParser parser = new SimpleQueryParser(new StandardAnalyzer(), Collections.singletonMap("foo", 1f), -1, sqpSettings, mockShardContext);
    assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("bar"));
    assertEquals(new TermQuery(new Term("foo.quote", "bar")), parser.parse("\"bar\""));
    // Now check what happens if foo.quote does not exist
    mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, xContentRegistry(), null, null, System::currentTimeMillis) {

        @Override
        public MappedFieldType fieldMapper(String name) {
            if (name.equals("foo.quote")) {
                return null;
            }
            return new MockFieldMapper.FakeFieldType();
        }
    };
    parser = new SimpleQueryParser(new StandardAnalyzer(), Collections.singletonMap("foo", 1f), -1, sqpSettings, mockShardContext);
    assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("bar"));
    assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("\"bar\""));
}
Also used : SpanTermQuery(org.apache.lucene.search.spans.SpanTermQuery) TermQuery(org.apache.lucene.search.TermQuery) IndexSettings(org.elasticsearch.index.IndexSettings) MockFieldMapper(org.elasticsearch.index.mapper.MockFieldMapper) Term(org.apache.lucene.index.Term) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) MappedFieldType(org.elasticsearch.index.mapper.MappedFieldType) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings)

Example 9 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class AbstractTermVectorsTestCase method indexDocsWithLucene.

protected DirectoryReader indexDocsWithLucene(TestDoc[] testDocs) throws IOException {
    Map<String, Analyzer> mapping = new HashMap<>();
    for (TestFieldSetting field : testDocs[0].fieldSettings) {
        if (field.storedPayloads) {
            mapping.put(field.name, new Analyzer() {

                @Override
                protected TokenStreamComponents createComponents(String fieldName) {
                    Tokenizer tokenizer = new StandardTokenizer();
                    TokenFilter filter = new LowerCaseFilter(tokenizer);
                    filter = new TypeAsPayloadTokenFilter(filter);
                    return new TokenStreamComponents(tokenizer, filter);
                }
            });
        }
    }
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(CharArraySet.EMPTY_SET), mapping);
    Directory dir = new RAMDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(wrapper);
    conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);
    for (TestDoc doc : testDocs) {
        Document d = new Document();
        d.add(new Field("id", doc.id, StringField.TYPE_STORED));
        for (int i = 0; i < doc.fieldContent.length; i++) {
            FieldType type = new FieldType(TextField.TYPE_STORED);
            TestFieldSetting fieldSetting = doc.fieldSettings[i];
            type.setStoreTermVectorOffsets(fieldSetting.storedOffset);
            type.setStoreTermVectorPayloads(fieldSetting.storedPayloads);
            type.setStoreTermVectorPositions(fieldSetting.storedPositions || fieldSetting.storedPayloads || fieldSetting.storedOffset);
            type.setStoreTermVectors(true);
            type.freeze();
            d.add(new Field(fieldSetting.name, doc.fieldContent[i], type));
        }
        writer.updateDocument(new Term("id", doc.id), d);
        writer.commit();
    }
    writer.close();
    return DirectoryReader.open(dir);
}
Also used : HashMap(java.util.HashMap) TypeAsPayloadTokenFilter(org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter) Term(org.apache.lucene.index.Term) Analyzer(org.apache.lucene.analysis.Analyzer) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) PerFieldAnalyzerWrapper(org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper) FieldType(org.apache.lucene.document.FieldType) StringField(org.apache.lucene.document.StringField) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) IndexWriter(org.apache.lucene.index.IndexWriter) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter) TypeAsPayloadTokenFilter(org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter) TokenFilter(org.apache.lucene.analysis.TokenFilter) RAMDirectory(org.apache.lucene.store.RAMDirectory) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 10 with StandardAnalyzer

use of org.apache.lucene.analysis.standard.StandardAnalyzer in project elasticsearch by elastic.

the class TermVectorsUnitTests method writeEmptyTermVector.

private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {
    Directory dir = newDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    conf.setOpenMode(OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);
    FieldType type = new FieldType(TextField.TYPE_STORED);
    type.setStoreTermVectorOffsets(true);
    type.setStoreTermVectorPayloads(false);
    type.setStoreTermVectorPositions(true);
    type.setStoreTermVectors(true);
    type.freeze();
    Document d = new Document();
    d.add(new Field("id", "abc", StringField.TYPE_STORED));
    writer.updateDocument(new Term("id", "abc"), d);
    writer.commit();
    writer.close();
    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
    ScoreDoc[] scoreDocs = search.scoreDocs;
    int doc = scoreDocs[0].doc;
    Fields fields = dr.getTermVectors(doc);
    EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
    outResponse.setFields(fields, null, flags, fields);
    outResponse.setExists(true);
    dr.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) DirectoryReader(org.apache.lucene.index.DirectoryReader) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) Flag(org.elasticsearch.action.termvectors.TermVectorsRequest.Flag) FieldType(org.apache.lucene.document.FieldType) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) StringField(org.apache.lucene.document.StringField) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) Fields(org.apache.lucene.index.Fields) IndexWriter(org.apache.lucene.index.IndexWriter) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

StandardAnalyzer (org.apache.lucene.analysis.standard.StandardAnalyzer)112 Analyzer (org.apache.lucene.analysis.Analyzer)37 IndexWriter (org.apache.lucene.index.IndexWriter)36 Document (org.apache.lucene.document.Document)29 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)29 IndexSearcher (org.apache.lucene.search.IndexSearcher)24 Term (org.apache.lucene.index.Term)22 RAMDirectory (org.apache.lucene.store.RAMDirectory)21 Test (org.junit.Test)21 Query (org.apache.lucene.search.Query)20 BooleanQuery (org.apache.lucene.search.BooleanQuery)19 TermQuery (org.apache.lucene.search.TermQuery)19 IOException (java.io.IOException)16 Before (org.junit.Before)15 IndexReader (org.apache.lucene.index.IndexReader)14 HashMap (java.util.HashMap)13 Field (org.apache.lucene.document.Field)13 ArrayList (java.util.ArrayList)12 QueryParser (org.apache.lucene.queryparser.classic.QueryParser)12 Directory (org.apache.lucene.store.Directory)12