use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.
the class DelimitedPayloadTokenFilterTest method testIntEncoding.
public void testIntEncoding() throws Exception {
String test = "The quick|1 red|2 fox|3 jumped over the lazy|5 brown|99 dogs|83";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(whitespaceMockTokenizer(test), '|', new IntegerEncoder());
CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
filter.reset();
assertTermEquals("The", filter, termAtt, payAtt, null);
assertTermEquals("quick", filter, termAtt, payAtt, PayloadHelper.encodeInt(1));
assertTermEquals("red", filter, termAtt, payAtt, PayloadHelper.encodeInt(2));
assertTermEquals("fox", filter, termAtt, payAtt, PayloadHelper.encodeInt(3));
assertTermEquals("jumped", filter, termAtt, payAtt, null);
assertTermEquals("over", filter, termAtt, payAtt, null);
assertTermEquals("the", filter, termAtt, payAtt, null);
assertTermEquals("lazy", filter, termAtt, payAtt, PayloadHelper.encodeInt(5));
assertTermEquals("brown", filter, termAtt, payAtt, PayloadHelper.encodeInt(99));
assertTermEquals("dogs", filter, termAtt, payAtt, PayloadHelper.encodeInt(83));
assertFalse(filter.incrementToken());
filter.end();
filter.close();
}
use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.
the class NumericPayloadTokenFilterTest method test.
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
final MockTokenizer input = new MockTokenizer(MockTokenizer.WHITESPACE, false);
input.setReader(new StringReader(test));
NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(input), 3, "D");
boolean seenDogs = false;
CharTermAttribute termAtt = nptf.getAttribute(CharTermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
PayloadAttribute payloadAtt = nptf.getAttribute(PayloadAttribute.class);
nptf.reset();
while (nptf.incrementToken()) {
if (termAtt.toString().equals("dogs")) {
seenDogs = true;
assertTrue(typeAtt.type() + " is not equal to " + "D", typeAtt.type().equals("D") == true);
assertTrue("payloadAtt.getPayload() is null and it shouldn't be", payloadAtt.getPayload() != null);
//safe here to just use the bytes, otherwise we should use offset, length
byte[] bytes = payloadAtt.getPayload().bytes;
assertTrue(bytes.length + " does not equal: " + payloadAtt.getPayload().length, bytes.length == payloadAtt.getPayload().length);
assertTrue(payloadAtt.getPayload().offset + " does not equal: " + 0, payloadAtt.getPayload().offset == 0);
float pay = PayloadHelper.decodeFloat(bytes);
assertTrue(pay + " does not equal: " + 3, pay == 3);
} else {
assertTrue(typeAtt.type() + " is not null and it should be", typeAtt.type().equals("word"));
}
}
assertTrue(seenDogs + " does not equal: " + true, seenDogs == true);
}
use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.
the class TestSimplePatternTokenizer method testEmptyStringPatternOneMatch.
public void testEmptyStringPatternOneMatch() throws Exception {
Tokenizer t = new SimplePatternTokenizer("a*");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("bbab"));
t.reset();
assertTrue(t.incrementToken());
assertEquals("a", termAtt.toString());
assertFalse(t.incrementToken());
}
use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.
the class ShingleAnalyzerWrapperTest method testShingleAnalyzerWrapperPhraseQuery.
/*
* This shows how to construct a phrase query containing shingles.
*/
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
try (TokenStream ts = analyzer.tokenStream("content", "this sentence")) {
int j = -1;
PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while (ts.incrementToken()) {
j += posIncrAtt.getPositionIncrement();
String termText = termAtt.toString();
builder.add(new Term("content", termText), j);
}
ts.end();
}
PhraseQuery q = builder.build();
ScoreDoc[] hits = searcher.search(q, 1000).scoreDocs;
int[] ranks = new int[] { 0 };
compareRanks(hits, ranks);
}
use of org.apache.lucene.analysis.tokenattributes.CharTermAttribute in project lucene-solr by apache.
the class TestMorfologikAnalyzer method testLeftoverStems.
/** Test reuse of MorfologikFilter with leftover stems. */
public final void testLeftoverStems() throws IOException {
Analyzer a = getTestAnalyzer();
try (TokenStream ts_1 = a.tokenStream("dummy", "liście")) {
CharTermAttribute termAtt_1 = ts_1.getAttribute(CharTermAttribute.class);
ts_1.reset();
ts_1.incrementToken();
assertEquals("first stream", "liście", termAtt_1.toString());
ts_1.end();
}
try (TokenStream ts_2 = a.tokenStream("dummy", "danych")) {
CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
ts_2.reset();
ts_2.incrementToken();
assertEquals("second stream", "dany", termAtt_2.toString());
ts_2.end();
}
a.close();
}
Aggregations