use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestCompressingStoredFieldsFormat method testDeletePartiallyWrittenFilesIfAbort.
public void testDeletePartiallyWrittenFilesIfAbort() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random()));
iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30));
iwConf.setCodec(CompressingCodec.randomInstance(random()));
// disable CFS because this test checks file names
iwConf.setMergePolicy(newLogMergePolicy(false));
iwConf.setUseCompoundFile(false);
// Cannot use RIW because this test wants CFS to stay off:
IndexWriter iw = new IndexWriter(dir, iwConf);
final Document validDoc = new Document();
validDoc.add(new IntPoint("id", 0));
validDoc.add(new StoredField("id", 0));
iw.addDocument(validDoc);
iw.commit();
// make sure that #writeField will fail to trigger an abort
final Document invalidDoc = new Document();
FieldType fieldType = new FieldType();
fieldType.setStored(true);
invalidDoc.add(new Field("invalid", fieldType) {
@Override
public String stringValue() {
// abort the segment!! We should fix this.
return null;
}
});
try {
iw.addDocument(invalidDoc);
iw.commit();
} catch (IllegalArgumentException iae) {
// expected
assertEquals(iae, iw.getTragicException());
}
// Writer should be closed by tragedy
assertFalse(iw.isOpen());
dir.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class SynonymTokenizer method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
//Not many use this setup:
a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
dir = newDirectory();
//Most tests use this setup:
analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
ramDir = newDirectory();
fieldType = random().nextBoolean() ? FIELD_TYPE_TV : TextField.TYPE_STORED;
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy()));
for (String text : texts) {
writer.addDocument(doc(FIELD_NAME, text));
}
// a few tests need other docs...:
Document doc = new Document();
doc.add(new IntPoint(NUMERIC_FIELD_NAME, 1));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 1));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint(NUMERIC_FIELD_NAME, 3));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 3));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint(NUMERIC_FIELD_NAME, 5));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 5));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint(NUMERIC_FIELD_NAME, 7));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 7));
writer.addDocument(doc);
Document childDoc = doc(FIELD_NAME, "child document");
Document parentDoc = doc(FIELD_NAME, "parent document");
writer.addDocuments(Arrays.asList(childDoc, parentDoc));
writer.forceMerge(1);
writer.close();
reader = DirectoryReader.open(ramDir);
//Misc:
numHighlights = 0;
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestJoinUtil method testEquals_numericJoin.
public void testEquals_numericJoin() throws Exception {
final int numDocs = atLeast(random(), 50);
try (final Directory dir = newDirectory()) {
try (final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()))) {
boolean multiValued = random().nextBoolean();
String joinField = multiValued ? "mvField" : "svField";
for (int id = 0; id < numDocs; id++) {
Document doc = new Document();
doc.add(new TextField("id", "" + id, Field.Store.NO));
doc.add(new TextField("name", "name" + (id % 7), Field.Store.NO));
if (multiValued) {
int numValues = 1 + random().nextInt(2);
for (int i = 0; i < numValues; i++) {
doc.add(new IntPoint(joinField, random().nextInt(13)));
doc.add(new SortedNumericDocValuesField(joinField, random().nextInt(13)));
}
} else {
doc.add(new IntPoint(joinField, random().nextInt(13)));
doc.add(new NumericDocValuesField(joinField, random().nextInt(13)));
}
w.addDocument(doc);
}
Set<ScoreMode> scoreModes = EnumSet.allOf(ScoreMode.class);
ScoreMode scoreMode1 = scoreModes.toArray(new ScoreMode[0])[random().nextInt(scoreModes.size())];
scoreModes.remove(scoreMode1);
ScoreMode scoreMode2 = scoreModes.toArray(new ScoreMode[0])[random().nextInt(scoreModes.size())];
final Query x;
try (IndexReader r = w.getReader()) {
IndexSearcher indexSearcher = new IndexSearcher(r);
x = JoinUtil.createJoinQuery(joinField, multiValued, joinField, Integer.class, new TermQuery(new Term("name", "name5")), indexSearcher, scoreMode1);
assertEquals("identical calls to createJoinQuery", x, JoinUtil.createJoinQuery(joinField, multiValued, joinField, Integer.class, new TermQuery(new Term("name", "name5")), indexSearcher, scoreMode1));
assertFalse("score mode (" + scoreMode1 + " != " + scoreMode2 + "), but queries are equal", x.equals(JoinUtil.createJoinQuery(joinField, multiValued, joinField, Integer.class, new TermQuery(new Term("name", "name5")), indexSearcher, scoreMode2)));
assertFalse("from fields (joinField != \"other_field\") but queries equals", x.equals(JoinUtil.createJoinQuery(joinField, multiValued, "other_field", Integer.class, new TermQuery(new Term("name", "name5")), indexSearcher, scoreMode1)));
assertFalse("from fields (\"other_field\" != joinField) but queries equals", x.equals(JoinUtil.createJoinQuery("other_field", multiValued, joinField, Integer.class, new TermQuery(new Term("name", "name5")), indexSearcher, scoreMode1)));
assertFalse("fromQuery (name:name5 != name:name6) but queries equals", x.equals(JoinUtil.createJoinQuery("other_field", multiValued, joinField, Integer.class, new TermQuery(new Term("name", "name6")), indexSearcher, scoreMode1)));
}
for (int i = 14; i < 26; i++) {
Document doc = new Document();
doc.add(new TextField("id", "new_id", Field.Store.NO));
doc.add(new TextField("name", "name5", Field.Store.NO));
if (multiValued) {
int numValues = 1 + random().nextInt(2);
for (int j = 0; j < numValues; j++) {
doc.add(new SortedNumericDocValuesField(joinField, i));
doc.add(new IntPoint(joinField, i));
}
} else {
doc.add(new NumericDocValuesField(joinField, i));
doc.add(new IntPoint(joinField, i));
}
w.addDocument(doc);
}
try (IndexReader r = w.getReader()) {
IndexSearcher indexSearcher = new IndexSearcher(r);
assertFalse("Query shouldn't be equal, because new join values have been indexed", x.equals(JoinUtil.createJoinQuery(joinField, multiValued, joinField, Integer.class, new TermQuery(new Term("name", "name5")), indexSearcher, scoreMode1)));
}
}
}
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestMemoryIndexAgainstRAMDir method testPointValuesMemoryIndexVsNormalIndex.
public void testPointValuesMemoryIndexVsNormalIndex() throws Exception {
int size = atLeast(12);
List<Integer> randomValues = new ArrayList<>();
Document doc = new Document();
for (Integer randomInteger : random().ints(size).toArray()) {
doc.add(new IntPoint("int", randomInteger));
randomValues.add(randomInteger);
doc.add(new LongPoint("long", randomInteger));
doc.add(new FloatPoint("float", randomInteger));
doc.add(new DoublePoint("double", randomInteger));
}
MockAnalyzer mockAnalyzer = new MockAnalyzer(random());
MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc, mockAnalyzer);
IndexSearcher memoryIndexSearcher = memoryIndex.createSearcher();
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), mockAnalyzer));
writer.addDocument(doc);
writer.close();
IndexReader controlIndexReader = DirectoryReader.open(dir);
IndexSearcher controlIndexSearcher = new IndexSearcher(controlIndexReader);
Supplier<Integer> valueSupplier = () -> randomValues.get(random().nextInt(randomValues.size()));
Query[] queries = new Query[] { IntPoint.newExactQuery("int", valueSupplier.get()), LongPoint.newExactQuery("long", valueSupplier.get()), FloatPoint.newExactQuery("float", valueSupplier.get()), DoublePoint.newExactQuery("double", valueSupplier.get()), IntPoint.newSetQuery("int", valueSupplier.get(), valueSupplier.get()), LongPoint.newSetQuery("long", valueSupplier.get(), valueSupplier.get()), FloatPoint.newSetQuery("float", valueSupplier.get(), valueSupplier.get()), DoublePoint.newSetQuery("double", valueSupplier.get(), valueSupplier.get()), IntPoint.newRangeQuery("int", valueSupplier.get(), valueSupplier.get()), LongPoint.newRangeQuery("long", valueSupplier.get(), valueSupplier.get()), FloatPoint.newRangeQuery("float", valueSupplier.get(), valueSupplier.get()), DoublePoint.newRangeQuery("double", valueSupplier.get(), valueSupplier.get()) };
for (Query query : queries) {
assertEquals(controlIndexSearcher.count(query), controlIndexSearcher.count(query));
}
memoryIndexSearcher.getIndexReader().close();
controlIndexReader.close();
dir.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestMemoryIndex method testPointValues.
public void testPointValues() throws Exception {
List<Function<Long, IndexableField>> fieldFunctions = Arrays.asList((t) -> new IntPoint("number", t.intValue()), (t) -> new LongPoint("number", t), (t) -> new FloatPoint("number", t.floatValue()), (t) -> new DoublePoint("number", t.doubleValue()));
List<Function<Long, Query>> exactQueryFunctions = Arrays.asList((t) -> IntPoint.newExactQuery("number", t.intValue()), (t) -> LongPoint.newExactQuery("number", t), (t) -> FloatPoint.newExactQuery("number", t.floatValue()), (t) -> DoublePoint.newExactQuery("number", t.doubleValue()));
List<Function<long[], Query>> setQueryFunctions = Arrays.asList((t) -> IntPoint.newSetQuery("number", LongStream.of(t).mapToInt(value -> (int) value).toArray()), (t) -> LongPoint.newSetQuery("number", t), (t) -> FloatPoint.newSetQuery("number", Arrays.asList(LongStream.of(t).mapToObj(value -> (float) value).toArray(Float[]::new))), (t) -> DoublePoint.newSetQuery("number", LongStream.of(t).mapToDouble(value -> (double) value).toArray()));
List<BiFunction<Long, Long, Query>> rangeQueryFunctions = Arrays.asList((t, u) -> IntPoint.newRangeQuery("number", t.intValue(), u.intValue()), (t, u) -> LongPoint.newRangeQuery("number", t, u), (t, u) -> FloatPoint.newRangeQuery("number", t.floatValue(), u.floatValue()), (t, u) -> DoublePoint.newRangeQuery("number", t.doubleValue(), u.doubleValue()));
for (int i = 0; i < fieldFunctions.size(); i++) {
Function<Long, IndexableField> fieldFunction = fieldFunctions.get(i);
Function<Long, Query> exactQueryFunction = exactQueryFunctions.get(i);
Function<long[], Query> setQueryFunction = setQueryFunctions.get(i);
BiFunction<Long, Long, Query> rangeQueryFunction = rangeQueryFunctions.get(i);
Document doc = new Document();
for (int number = 1; number < 32; number += 2) {
doc.add(fieldFunction.apply((long) number));
}
MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer);
IndexSearcher indexSearcher = mi.createSearcher();
Query query = exactQueryFunction.apply(5L);
assertEquals(1, indexSearcher.count(query));
query = exactQueryFunction.apply(4L);
assertEquals(0, indexSearcher.count(query));
query = setQueryFunction.apply(new long[] { 3L, 9L, 19L });
assertEquals(1, indexSearcher.count(query));
query = setQueryFunction.apply(new long[] { 2L, 8L, 13L });
assertEquals(1, indexSearcher.count(query));
query = setQueryFunction.apply(new long[] { 2L, 8L, 16L });
assertEquals(0, indexSearcher.count(query));
query = rangeQueryFunction.apply(2L, 16L);
assertEquals(1, indexSearcher.count(query));
query = rangeQueryFunction.apply(24L, 48L);
assertEquals(1, indexSearcher.count(query));
query = rangeQueryFunction.apply(48L, 68L);
assertEquals(0, indexSearcher.count(query));
}
}
Aggregations