Search in sources :

Example 11 with Token

use of org.apache.lucene.analysis.Token in project lucene-solr by apache.

the class TestMultiPhraseQuery method testZeroPosIncr.

public void testZeroPosIncr() throws IOException {
    Directory dir = new RAMDirectory();
    final Token[] tokens = new Token[3];
    tokens[0] = new Token();
    tokens[0].append("a");
    tokens[0].setPositionIncrement(1);
    tokens[1] = new Token();
    tokens[1].append("b");
    tokens[1].setPositionIncrement(0);
    tokens[2] = new Token();
    tokens[2].append("c");
    tokens[2].setPositionIncrement(0);
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new TextField("field", new CannedTokenStream(tokens)));
    writer.addDocument(doc);
    doc = new Document();
    doc.add(new TextField("field", new CannedTokenStream(tokens)));
    writer.addDocument(doc);
    IndexReader r = writer.getReader();
    writer.close();
    IndexSearcher s = newSearcher(r);
    MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder();
    // case):
    if (true) {
        mpqb.add(new Term[] { new Term("field", "b"), new Term("field", "c") }, 0);
        mpqb.add(new Term[] { new Term("field", "a") }, 0);
    } else {
        mpqb.add(new Term[] { new Term("field", "a") }, 0);
        mpqb.add(new Term[] { new Term("field", "b"), new Term("field", "c") }, 0);
    }
    TopDocs hits = s.search(mpqb.build(), 2);
    assertEquals(2, hits.totalHits);
    assertEquals(hits.scoreDocs[0].score, hits.scoreDocs[1].score, 1e-5);
    /*
    for(int hit=0;hit<hits.totalHits;hit++) {
      ScoreDoc sd = hits.scoreDocs[hit];
      System.out.println("  hit doc=" + sd.doc + " score=" + sd.score);
    }
    */
    r.close();
    dir.close();
}
Also used : Token(org.apache.lucene.analysis.Token) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) IndexReader(org.apache.lucene.index.IndexReader) TextField(org.apache.lucene.document.TextField) CannedTokenStream(org.apache.lucene.analysis.CannedTokenStream) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) RAMDirectory(org.apache.lucene.store.RAMDirectory) Directory(org.apache.lucene.store.Directory)

Example 12 with Token

use of org.apache.lucene.analysis.Token in project lucene-solr by apache.

the class TestMultiPhraseQuery method testZeroPosIncrSloppyPqAnd.

/**
   * PQ AND Mode - Manually creating a phrase query
   */
public void testZeroPosIncrSloppyPqAnd() throws IOException {
    PhraseQuery.Builder builder = new PhraseQuery.Builder();
    int pos = -1;
    for (Token tap : INCR_0_QUERY_TOKENS_AND) {
        pos += tap.getPositionIncrement();
        builder.add(new Term("field", tap.toString()), pos);
    }
    builder.setSlop(0);
    doTestZeroPosIncrSloppy(builder.build(), 0);
    builder.setSlop(1);
    doTestZeroPosIncrSloppy(builder.build(), 0);
    builder.setSlop(2);
    doTestZeroPosIncrSloppy(builder.build(), 1);
}
Also used : Token(org.apache.lucene.analysis.Token) Term(org.apache.lucene.index.Term)

Example 13 with Token

use of org.apache.lucene.analysis.Token in project lucene-solr by apache.

the class TestTrimFilter method testTrim.

public void testTrim() throws Exception {
    char[] a = " a ".toCharArray();
    char[] b = "b   ".toCharArray();
    char[] ccc = "cCc".toCharArray();
    char[] whitespace = "   ".toCharArray();
    char[] empty = "".toCharArray();
    TokenStream ts = new CannedTokenStream(new Token(new String(a, 0, a.length), 1, 5), new Token(new String(b, 0, b.length), 6, 10), new Token(new String(ccc, 0, ccc.length), 11, 15), new Token(new String(whitespace, 0, whitespace.length), 16, 20), new Token(new String(empty, 0, empty.length), 21, 21));
    ts = new TrimFilter(ts);
    assertTokenStreamContents(ts, new String[] { "a", "b", "cCc", "", "" });
}
Also used : CannedTokenStream(org.apache.lucene.analysis.CannedTokenStream) TokenStream(org.apache.lucene.analysis.TokenStream) Token(org.apache.lucene.analysis.Token) CannedTokenStream(org.apache.lucene.analysis.CannedTokenStream)

Example 14 with Token

use of org.apache.lucene.analysis.Token in project lucene-solr by apache.

the class TestRemoveDuplicatesTokenFilter method tok.

public static Token tok(int pos, String t, int start, int end) {
    Token tok = new Token(t, start, end);
    tok.setPositionIncrement(pos);
    return tok;
}
Also used : Token(org.apache.lucene.analysis.Token)

Example 15 with Token

use of org.apache.lucene.analysis.Token in project lucene-solr by apache.

the class TestRemoveDuplicatesTokenFilterFactory method tok.

public static Token tok(int pos, String t, int start, int end) {
    Token tok = new Token(t, start, end);
    tok.setPositionIncrement(pos);
    return tok;
}
Also used : Token(org.apache.lucene.analysis.Token)

Aggregations

Token (org.apache.lucene.analysis.Token)100 CannedTokenStream (org.apache.lucene.analysis.CannedTokenStream)39 TokenStream (org.apache.lucene.analysis.TokenStream)31 Directory (org.apache.lucene.store.Directory)24 Test (org.junit.Test)23 Document (org.apache.lucene.document.Document)19 TextField (org.apache.lucene.document.TextField)19 BytesRef (org.apache.lucene.util.BytesRef)16 NamedList (org.apache.solr.common.util.NamedList)16 StringReader (java.io.StringReader)15 CharTermAttribute (org.apache.lucene.analysis.tokenattributes.CharTermAttribute)15 Analyzer (org.apache.lucene.analysis.Analyzer)14 ArrayList (java.util.ArrayList)13 Map (java.util.Map)13 Field (org.apache.lucene.document.Field)13 FieldType (org.apache.lucene.document.FieldType)11 IndexReader (org.apache.lucene.index.IndexReader)11 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)10 Tokenizer (org.apache.lucene.analysis.Tokenizer)9 Date (java.util.Date)8