use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class AnalyzingSuggesterTest method testIllegalLookupArgument.
public void testIllegalLookupArgument() throws Exception {
Analyzer a = new MockAnalyzer(random());
Directory tempDir = getDirectory();
AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true);
suggester.build(new InputArrayIterator(new Input[] { new Input("а где Люси?", 7) }));
expectThrows(IllegalArgumentException.class, () -> {
suggester.lookup("а", false, 3);
});
expectThrows(IllegalArgumentException.class, () -> {
suggester.lookup("а", false, 3);
});
IOUtils.close(a, tempDir);
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class AnalyzingSuggesterTest method testTooLongSuggestion.
// TODO: we need BaseSuggesterTestCase?
public void testTooLongSuggestion() throws Exception {
Analyzer a = new MockAnalyzer(random());
Directory tempDir = getDirectory();
AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a);
String bigString = TestUtil.randomSimpleString(random(), 30000, 30000);
try {
suggester.build(new InputArrayIterator(new Input[] { new Input(bigString, 7) }));
fail("did not hit expected exception");
} catch (StackOverflowError soe) {
// OK
} catch (IllegalArgumentException iae) {
// expected
}
IOUtils.close(a, tempDir);
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseGeoPointTestCase method searchSmallSet.
/** return topdocs over a small set of points in field "point" */
private TopDocs searchSmallSet(Query query, int size) throws Exception {
// this is a simple systematic test, indexing these points
// TODO: fragile: does not understand quantization in any way yet uses extremely high precision!
double[][] pts = new double[][] { { 32.763420, -96.774 }, { 32.7559529921407, -96.7759895324707 }, { 32.77866942010977, -96.77701950073242 }, { 32.7756745755423, -96.7706036567688 }, { 27.703618681345585, -139.73458170890808 }, { 32.94823588839368, -96.4538113027811 }, { 33.06047141970814, -96.65084838867188 }, { 32.778650, -96.7772 }, { -88.56029371730983, -177.23537676036358 }, { 33.541429799076354, -26.779373834241003 }, { 26.774024500421728, -77.35379276106497 }, { -90.0, -14.796283808944777 }, { 32.94823588839368, -178.8538113027811 }, { 32.94823588839368, 178.8538113027811 }, { 40.720611, -73.998776 }, { -44.5, -179.5 } };
Directory directory = newDirectory();
// TODO: must these simple tests really rely on docid order?
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000));
iwc.setMergePolicy(newLogMergePolicy());
// Else seeds may not reproduce:
iwc.setMergeScheduler(new SerialMergeScheduler());
RandomIndexWriter writer = new RandomIndexWriter(random(), directory, iwc);
for (double[] p : pts) {
Document doc = new Document();
addPointToDoc("point", doc, p[0], p[1]);
writer.addDocument(doc);
}
// add explicit multi-valued docs
for (int i = 0; i < pts.length; i += 2) {
Document doc = new Document();
addPointToDoc("point", doc, pts[i][0], pts[i][1]);
addPointToDoc("point", doc, pts[i + 1][0], pts[i + 1][1]);
writer.addDocument(doc);
}
// index random string documents
for (int i = 0; i < random().nextInt(10); ++i) {
Document doc = new Document();
doc.add(new StringField("string", Integer.toString(i), Field.Store.NO));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(reader);
TopDocs topDocs = searcher.search(query, size);
reader.close();
directory.close();
return topDocs;
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseCompressingDocValuesFormatTestCase method testUniqueValuesCompression.
public void testUniqueValuesCompression() throws IOException {
final Directory dir = new RAMDirectory();
final IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
final IndexWriter iwriter = new IndexWriter(dir, iwc);
final int uniqueValueCount = TestUtil.nextInt(random(), 1, 256);
final List<Long> values = new ArrayList<>();
final Document doc = new Document();
final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
doc.add(dvf);
for (int i = 0; i < 300; ++i) {
final long value;
if (values.size() < uniqueValueCount) {
value = random().nextLong();
values.add(value);
} else {
value = RandomPicks.randomFrom(random(), values);
}
dvf.setLongValue(value);
iwriter.addDocument(doc);
}
iwriter.forceMerge(1);
final long size1 = dirSize(dir);
for (int i = 0; i < 20; ++i) {
dvf.setLongValue(RandomPicks.randomFrom(random(), values));
iwriter.addDocument(doc);
}
iwriter.forceMerge(1);
final long size2 = dirSize(dir);
// make sure the new longs did not cost 8 bytes each
assertTrue(size2 < size1 + 8 * 20);
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testBigNumericRange2.
public void testBigNumericRange2() throws IOException {
Analyzer analyzer = new MockAnalyzer(random());
Directory directory = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
conf.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
Document doc = new Document();
doc.add(new NumericDocValuesField("dv", -8841491950446638677L));
iwriter.addDocument(doc);
doc = new Document();
doc.add(new NumericDocValuesField("dv", 9062230939892376225L));
iwriter.addDocument(doc);
iwriter.forceMerge(1);
iwriter.close();
// Now search the index:
// read-only=true
IndexReader ireader = DirectoryReader.open(directory);
assert ireader.leaves().size() == 1;
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
assertEquals(0, dv.nextDoc());
assertEquals(-8841491950446638677L, dv.longValue());
assertEquals(1, dv.nextDoc());
assertEquals(9062230939892376225L, dv.longValue());
ireader.close();
directory.close();
}
Aggregations