Search in sources :

Example 36 with CharTermAttribute

use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.

the class DelimitedPayloadTokenFilterTest method testIntEncoding.

public void testIntEncoding() throws Exception {
    String test = "The quick|1 red|2 fox|3 jumped over the lazy|5 brown|99 dogs|83";
    DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(whitespaceMockTokenizer(test), '|', new IntegerEncoder());
    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
    PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
    filter.reset();
    assertTermEquals("The", filter, termAtt, payAtt, null);
    assertTermEquals("quick", filter, termAtt, payAtt, PayloadHelper.encodeInt(1));
    assertTermEquals("red", filter, termAtt, payAtt, PayloadHelper.encodeInt(2));
    assertTermEquals("fox", filter, termAtt, payAtt, PayloadHelper.encodeInt(3));
    assertTermEquals("jumped", filter, termAtt, payAtt, null);
    assertTermEquals("over", filter, termAtt, payAtt, null);
    assertTermEquals("the", filter, termAtt, payAtt, null);
    assertTermEquals("lazy", filter, termAtt, payAtt, PayloadHelper.encodeInt(5));
    assertTermEquals("brown", filter, termAtt, payAtt, PayloadHelper.encodeInt(99));
    assertTermEquals("dogs", filter, termAtt, payAtt, PayloadHelper.encodeInt(83));
    assertFalse(filter.incrementToken());
    filter.end();
    filter.close();
}
Also used : PayloadAttribute(org.apache.lucene.analysis.tokenattributes.PayloadAttribute) CharTermAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute)

Example 37 with CharTermAttribute

use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.

the class NumericPayloadTokenFilterTest method test.

public void test() throws IOException {
    String test = "The quick red fox jumped over the lazy brown dogs";
    final MockTokenizer input = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    input.setReader(new StringReader(test));
    NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(input), 3, "D");
    boolean seenDogs = false;
    CharTermAttribute termAtt = nptf.getAttribute(CharTermAttribute.class);
    TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
    PayloadAttribute payloadAtt = nptf.getAttribute(PayloadAttribute.class);
    nptf.reset();
    while (nptf.incrementToken()) {
        if (termAtt.toString().equals("dogs")) {
            seenDogs = true;
            assertTrue(typeAtt.type() + " is not equal to " + "D", typeAtt.type().equals("D") == true);
            assertTrue("payloadAtt.getPayload() is null and it shouldn't be", payloadAtt.getPayload() != null);
            //safe here to just use the bytes, otherwise we should use offset, length
            byte[] bytes = payloadAtt.getPayload().bytes;
            assertTrue(bytes.length + " does not equal: " + payloadAtt.getPayload().length, bytes.length == payloadAtt.getPayload().length);
            assertTrue(payloadAtt.getPayload().offset + " does not equal: " + 0, payloadAtt.getPayload().offset == 0);
            float pay = PayloadHelper.decodeFloat(bytes);
            assertTrue(pay + " does not equal: " + 3, pay == 3);
        } else {
            assertTrue(typeAtt.type() + " is not null and it should be", typeAtt.type().equals("word"));
        }
    }
    assertTrue(seenDogs + " does not equal: " + true, seenDogs == true);
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) PayloadAttribute(org.apache.lucene.analysis.tokenattributes.PayloadAttribute) CharTermAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute) TypeAttribute(org.apache.lucene.analysis.tokenattributes.TypeAttribute) StringReader(java.io.StringReader)

Example 38 with CharTermAttribute

use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.

the class TestSimplePatternTokenizer method testEmptyStringPatternOneMatch.

public void testEmptyStringPatternOneMatch() throws Exception {
    Tokenizer t = new SimplePatternTokenizer("a*");
    CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
    t.setReader(new StringReader("bbab"));
    t.reset();
    assertTrue(t.incrementToken());
    assertEquals("a", termAtt.toString());
    assertFalse(t.incrementToken());
}
Also used : CharTermAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 39 with CharTermAttribute

use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.

the class ShingleAnalyzerWrapperTest method testShingleAnalyzerWrapperPhraseQuery.

/*
   * This shows how to construct a phrase query containing shingles.
   */
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
    PhraseQuery.Builder builder = new PhraseQuery.Builder();
    try (TokenStream ts = analyzer.tokenStream("content", "this sentence")) {
        int j = -1;
        PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
        CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
        ts.reset();
        while (ts.incrementToken()) {
            j += posIncrAtt.getPositionIncrement();
            String termText = termAtt.toString();
            builder.add(new Term("content", termText), j);
        }
        ts.end();
    }
    PhraseQuery q = builder.build();
    ScoreDoc[] hits = searcher.search(q, 1000).scoreDocs;
    int[] ranks = new int[] { 0 };
    compareRanks(hits, ranks);
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) CharTermAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute) PhraseQuery(org.apache.lucene.search.PhraseQuery) Term(org.apache.lucene.index.Term) PositionIncrementAttribute(org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute) ScoreDoc(org.apache.lucene.search.ScoreDoc)

Example 40 with CharTermAttribute

use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.

the class TestMorfologikAnalyzer method testLeftoverStems.

/** Test reuse of MorfologikFilter with leftover stems. */
public final void testLeftoverStems() throws IOException {
    Analyzer a = getTestAnalyzer();
    try (TokenStream ts_1 = a.tokenStream("dummy", "liście")) {
        CharTermAttribute termAtt_1 = ts_1.getAttribute(CharTermAttribute.class);
        ts_1.reset();
        ts_1.incrementToken();
        assertEquals("first stream", "liście", termAtt_1.toString());
        ts_1.end();
    }
    try (TokenStream ts_2 = a.tokenStream("dummy", "danych")) {
        CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
        ts_2.reset();
        ts_2.incrementToken();
        assertEquals("second stream", "dany", termAtt_2.toString());
        ts_2.end();
    }
    a.close();
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) CharTermAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute) Analyzer(org.apache.lucene.analysis.Analyzer)

Aggregations

CharTermAttribute (org.apache.lucene.analysis.tokenattributes.CharTermAttribute)151 TokenStream (org.apache.lucene.analysis.TokenStream)95 StringReader (java.io.StringReader)46 OffsetAttribute (org.apache.lucene.analysis.tokenattributes.OffsetAttribute)35 PositionIncrementAttribute (org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute)34 IOException (java.io.IOException)27 ArrayList (java.util.ArrayList)27 Tokenizer (org.apache.lucene.analysis.Tokenizer)25 Analyzer (org.apache.lucene.analysis.Analyzer)20 PayloadAttribute (org.apache.lucene.analysis.tokenattributes.PayloadAttribute)16 BytesRef (org.apache.lucene.util.BytesRef)15 TypeAttribute (org.apache.lucene.analysis.tokenattributes.TypeAttribute)13 LinkedList (java.util.LinkedList)11 FlagsAttribute (org.apache.lucene.analysis.tokenattributes.FlagsAttribute)10 Term (org.apache.lucene.index.Term)10 HashMap (java.util.HashMap)9 Token (org.apache.lucene.analysis.Token)8 Document (org.apache.lucene.document.Document)8 List (java.util.List)7 HashSet (java.util.HashSet)6