Search in sources :

Example 46 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestSnowball method testEnglish.

public void testEnglish() throws Exception {
    Analyzer a = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer();
            return new TokenStreamComponents(tokenizer, new SnowballFilter(tokenizer, "English"));
        }
    };
    assertAnalyzesTo(a, "he abhorred accents", new String[] { "he", "abhor", "accent" });
    a.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) Analyzer(org.apache.lucene.analysis.Analyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Example 47 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestSerbianNormalizationFilterFactory method testStemming.

public void testStemming() throws Exception {
    Reader reader = new StringReader("Ä‘ura");
    TokenStream stream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    ((Tokenizer) stream).setReader(reader);
    stream = tokenFilterFactory("SerbianNormalization").create(stream);
    assertTokenStreamContents(stream, new String[] { "djura" });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) StringReader(java.io.StringReader) Reader(java.io.Reader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer)

Example 48 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestSerbianNormalizationRegularFilter method setUp.

@Override
public void setUp() throws Exception {
    super.setUp();
    analyzer = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            final Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
            final TokenStream stream = new SerbianNormalizationRegularFilter(tokenizer);
            return new TokenStreamComponents(tokenizer, stream);
        }
    };
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) Analyzer(org.apache.lucene.analysis.Analyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Example 49 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestSolrSynonymParser method testSimple.

/** Tests some simple examples from the solr wiki */
public void testSimple() throws Exception {
    String testFile = "i-pod, ipod, ipoooood\n" + "foo => foo bar\n" + "foo => baz\n" + "this test, that testing";
    Analyzer analyzer = new MockAnalyzer(random());
    SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
    parser.parse(new StringReader(testFile));
    final SynonymMap map = parser.build();
    analyzer.close();
    analyzer = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);
            return new TokenStreamComponents(tokenizer, new SynonymFilter(tokenizer, map, true));
        }
    };
    assertAnalyzesTo(analyzer, "ball", new String[] { "ball" }, new int[] { 1 });
    assertAnalyzesTo(analyzer, "i-pod", new String[] { "i-pod", "ipod", "ipoooood" }, new int[] { 1, 0, 0 });
    assertAnalyzesTo(analyzer, "foo", new String[] { "foo", "baz", "bar" }, new int[] { 1, 0, 1 });
    assertAnalyzesTo(analyzer, "this test", new String[] { "this", "that", "test", "testing" }, new int[] { 1, 0, 1, 0 });
    analyzer.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) StringReader(java.io.StringReader) EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) Analyzer(org.apache.lucene.analysis.Analyzer) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer)

Example 50 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestIndexWriterOnVMError method doTest.

// just one thread, serial merge policy, hopefully debuggable
private void doTest(MockDirectoryWrapper.Failure failOn) throws Exception {
    // log all exceptions we hit, in case we fail (for debugging)
    ByteArrayOutputStream exceptionLog = new ByteArrayOutputStream();
    PrintStream exceptionStream = new PrintStream(exceptionLog, true, "UTF-8");
    //PrintStream exceptionStream = System.out;
    final long analyzerSeed = random().nextLong();
    final Analyzer analyzer = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
            // we are gonna make it angry
            tokenizer.setEnableChecks(false);
            TokenStream stream = tokenizer;
            // emit some payloads
            if (fieldName.contains("payloads")) {
                stream = new MockVariableLengthPayloadFilter(new Random(analyzerSeed), stream);
            }
            return new TokenStreamComponents(tokenizer, stream);
        }
    };
    MockDirectoryWrapper dir = null;
    final int numIterations = TEST_NIGHTLY ? atLeast(100) : atLeast(5);
    STARTOVER: for (int iter = 0; iter < numIterations; iter++) {
        try {
            // close from last run
            if (dir != null) {
                dir.close();
            }
            // disable slow things: we don't rely upon sleeps here.
            dir = newMockDirectory();
            dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
            dir.setUseSlowOpenClosers(false);
            IndexWriterConfig conf = newIndexWriterConfig(analyzer);
            // just for now, try to keep this test reproducible
            conf.setMergeScheduler(new SerialMergeScheduler());
            // test never makes it this far...
            int numDocs = atLeast(2000);
            IndexWriter iw = new IndexWriter(dir, conf);
            // ensure there is always a commit
            iw.commit();
            dir.failOn(failOn);
            for (int i = 0; i < numDocs; i++) {
                Document doc = new Document();
                doc.add(newStringField("id", Integer.toString(i), Field.Store.NO));
                doc.add(new NumericDocValuesField("dv", i));
                doc.add(new BinaryDocValuesField("dv2", new BytesRef(Integer.toString(i))));
                doc.add(new SortedDocValuesField("dv3", new BytesRef(Integer.toString(i))));
                doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i))));
                doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i - 1))));
                doc.add(new SortedNumericDocValuesField("dv5", i));
                doc.add(new SortedNumericDocValuesField("dv5", i - 1));
                doc.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
                // ensure we store something
                doc.add(new StoredField("stored1", "foo"));
                doc.add(new StoredField("stored1", "bar"));
                // ensure we get some payloads
                doc.add(newTextField("text_payloads", TestUtil.randomAnalysisString(random(), 6, true), Field.Store.NO));
                // ensure we get some vectors
                FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
                ft.setStoreTermVectors(true);
                doc.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
                doc.add(new IntPoint("point", random().nextInt()));
                doc.add(new IntPoint("point2d", random().nextInt(), random().nextInt()));
                if (random().nextInt(10) > 0) {
                    // single doc
                    try {
                        iw.addDocument(doc);
                        // we made it, sometimes delete our doc, or update a dv
                        int thingToDo = random().nextInt(4);
                        if (thingToDo == 0) {
                            iw.deleteDocuments(new Term("id", Integer.toString(i)));
                        } else if (thingToDo == 1) {
                            iw.updateNumericDocValue(new Term("id", Integer.toString(i)), "dv", i + 1L);
                        } else if (thingToDo == 2) {
                            iw.updateBinaryDocValue(new Term("id", Integer.toString(i)), "dv2", new BytesRef(Integer.toString(i + 1)));
                        }
                    } catch (VirtualMachineError | AlreadyClosedException disaster) {
                        getTragedy(disaster, iw, exceptionStream);
                        continue STARTOVER;
                    }
                } else {
                    // block docs
                    Document doc2 = new Document();
                    doc2.add(newStringField("id", Integer.toString(-i), Field.Store.NO));
                    doc2.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
                    doc2.add(new StoredField("stored1", "foo"));
                    doc2.add(new StoredField("stored1", "bar"));
                    doc2.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
                    try {
                        iw.addDocuments(Arrays.asList(doc, doc2));
                        // we made it, sometimes delete our docs
                        if (random().nextBoolean()) {
                            iw.deleteDocuments(new Term("id", Integer.toString(i)), new Term("id", Integer.toString(-i)));
                        }
                    } catch (VirtualMachineError | AlreadyClosedException disaster) {
                        getTragedy(disaster, iw, exceptionStream);
                        continue STARTOVER;
                    }
                }
                if (random().nextInt(10) == 0) {
                    // trigger flush:
                    try {
                        if (random().nextBoolean()) {
                            DirectoryReader ir = null;
                            try {
                                ir = DirectoryReader.open(iw, random().nextBoolean(), false);
                                TestUtil.checkReader(ir);
                            } finally {
                                IOUtils.closeWhileHandlingException(ir);
                            }
                        } else {
                            iw.commit();
                        }
                        if (DirectoryReader.indexExists(dir)) {
                            TestUtil.checkIndex(dir);
                        }
                    } catch (VirtualMachineError | AlreadyClosedException disaster) {
                        getTragedy(disaster, iw, exceptionStream);
                        continue STARTOVER;
                    }
                }
            }
            try {
                iw.close();
            } catch (VirtualMachineError | AlreadyClosedException disaster) {
                getTragedy(disaster, iw, exceptionStream);
                continue STARTOVER;
            }
        } catch (Throwable t) {
            System.out.println("Unexpected exception: dumping fake-exception-log:...");
            exceptionStream.flush();
            System.out.println(exceptionLog.toString("UTF-8"));
            System.out.flush();
            Rethrow.rethrow(t);
        }
    }
    dir.close();
    if (VERBOSE) {
        System.out.println("TEST PASSED: dumping fake-exception-log:...");
        System.out.println(exceptionLog.toString("UTF-8"));
    }
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) Analyzer(org.apache.lucene.analysis.Analyzer) Document(org.apache.lucene.document.Document) StoredField(org.apache.lucene.document.StoredField) Random(java.util.Random) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) BytesRef(org.apache.lucene.util.BytesRef) MockDirectoryWrapper(org.apache.lucene.store.MockDirectoryWrapper) PrintStream(java.io.PrintStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) IntPoint(org.apache.lucene.document.IntPoint) FieldType(org.apache.lucene.document.FieldType) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) IntPoint(org.apache.lucene.document.IntPoint) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) MockVariableLengthPayloadFilter(org.apache.lucene.analysis.MockVariableLengthPayloadFilter)

Aggregations

MockTokenizer (org.apache.lucene.analysis.MockTokenizer)280 Tokenizer (org.apache.lucene.analysis.Tokenizer)204 Analyzer (org.apache.lucene.analysis.Analyzer)161 StringReader (java.io.StringReader)118 TokenStream (org.apache.lucene.analysis.TokenStream)116 KeywordTokenizer (org.apache.lucene.analysis.core.KeywordTokenizer)106 Reader (java.io.Reader)59 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)54 CharArraySet (org.apache.lucene.analysis.CharArraySet)44 Directory (org.apache.lucene.store.Directory)36 Document (org.apache.lucene.document.Document)31 BytesRef (org.apache.lucene.util.BytesRef)25 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)21 TextField (org.apache.lucene.document.TextField)20 CannedTokenStream (org.apache.lucene.analysis.CannedTokenStream)18 Field (org.apache.lucene.document.Field)17 FieldType (org.apache.lucene.document.FieldType)14 StringField (org.apache.lucene.document.StringField)11 Input (org.apache.lucene.search.suggest.Input)11 InputArrayIterator (org.apache.lucene.search.suggest.InputArrayIterator)11