use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestLegacyFieldCache method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
NUM_DOCS = atLeast(500);
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
int theInt = Integer.MAX_VALUE;
float theFloat = Float.MAX_VALUE;
if (VERBOSE) {
System.out.println("TEST: setUp");
}
for (int i = 0; i < NUM_DOCS; i++) {
Document doc = new Document();
doc.add(new LegacyLongField("theLong", theLong--, Field.Store.NO));
doc.add(new LegacyDoubleField("theDouble", theDouble--, Field.Store.NO));
doc.add(new LegacyIntField("theInt", theInt--, Field.Store.NO));
doc.add(new LegacyFloatField("theFloat", theFloat--, Field.Store.NO));
if (i % 2 == 0) {
doc.add(new LegacyIntField("sparse", i, Field.Store.NO));
}
if (i % 2 == 0) {
doc.add(new LegacyIntField("numInt", i, Field.Store.NO));
}
writer.addDocument(doc);
}
IndexReader r = writer.getReader();
reader = SlowCompositeReaderWrapper.wrap(r);
TestUtil.checkReader(reader);
writer.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestUninvertingReader method testSortedSetIntegerManyValues.
/** Tests {@link Type#SORTED_SET_INTEGER} using Integer based fields, with and w/o precision steps */
public void testSortedSetIntegerManyValues() throws IOException {
final Directory dir = newDirectory();
final IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
final LegacyFieldType NO_TRIE_TYPE = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
NO_TRIE_TYPE.setNumericPrecisionStep(Integer.MAX_VALUE);
final Map<String, Type> UNINVERT_MAP = new LinkedHashMap<String, Type>();
UNINVERT_MAP.put("notrie_single", Type.SORTED_SET_INTEGER);
UNINVERT_MAP.put("notrie_multi", Type.SORTED_SET_INTEGER);
UNINVERT_MAP.put("trie_single", Type.SORTED_SET_INTEGER);
UNINVERT_MAP.put("trie_multi", Type.SORTED_SET_INTEGER);
final Set<String> MULTI_VALUES = new LinkedHashSet<String>();
MULTI_VALUES.add("trie_multi");
MULTI_VALUES.add("notrie_multi");
final int NUM_DOCS = TestUtil.nextInt(random(), 200, 1500);
final int MIN = TestUtil.nextInt(random(), 10, 100);
final int MAX = MIN + TestUtil.nextInt(random(), 10, 100);
final long EXPECTED_VALSET_SIZE = 1 + MAX - MIN;
{
// (at least) one doc should have every value, so that at least one segment has every value
final Document doc = new Document();
for (int i = MIN; i <= MAX; i++) {
doc.add(new LegacyIntField("trie_multi", i, Field.Store.NO));
doc.add(new LegacyIntField("notrie_multi", i, NO_TRIE_TYPE));
}
iw.addDocument(doc);
}
// now add some more random docs (note: starting at i=1 because of previously added doc)
for (int i = 1; i < NUM_DOCS; i++) {
final Document doc = new Document();
if (0 != TestUtil.nextInt(random(), 0, 9)) {
int val = TestUtil.nextInt(random(), MIN, MAX);
doc.add(new LegacyIntField("trie_single", val, Field.Store.NO));
doc.add(new LegacyIntField("notrie_single", val, NO_TRIE_TYPE));
}
if (0 != TestUtil.nextInt(random(), 0, 9)) {
int numMulti = atLeast(1);
while (0 < numMulti--) {
int val = TestUtil.nextInt(random(), MIN, MAX);
doc.add(new LegacyIntField("trie_multi", val, Field.Store.NO));
doc.add(new LegacyIntField("notrie_multi", val, NO_TRIE_TYPE));
}
}
iw.addDocument(doc);
}
iw.close();
final DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), UNINVERT_MAP);
TestUtil.checkReader(ir);
final int NUM_LEAVES = ir.leaves().size();
// check the leaves: no more then total set size
for (LeafReaderContext rc : ir.leaves()) {
final LeafReader ar = rc.reader();
for (String f : UNINVERT_MAP.keySet()) {
final SortedSetDocValues v = DocValues.getSortedSet(ar, f);
final long valSetSize = v.getValueCount();
assertTrue(f + ": Expected no more then " + EXPECTED_VALSET_SIZE + " values per segment, got " + valSetSize + " from: " + ar.toString(), valSetSize <= EXPECTED_VALSET_SIZE);
if (1 == NUM_LEAVES && MULTI_VALUES.contains(f)) {
// tighter check on multi fields in single segment index since we know one doc has all of them
assertEquals(f + ": Single segment LeafReader's value set should have had exactly expected size", EXPECTED_VALSET_SIZE, valSetSize);
}
}
}
// check the composite of all leaves: exact expectation of set size
final LeafReader composite = SlowCompositeReaderWrapper.wrap(ir);
TestUtil.checkReader(composite);
for (String f : MULTI_VALUES) {
final SortedSetDocValues v = composite.getSortedSetDocValues(f);
final long valSetSize = v.getValueCount();
assertEquals(f + ": Composite reader value set should have had exactly expected size", EXPECTED_VALSET_SIZE, valSetSize);
}
ir.close();
dir.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestOrdValues method addDoc.
private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
Document d = new Document();
Field f;
int scoreAndID = i + 1;
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setTokenized(false);
customType.setOmitNorms(true);
// for debug purposes
f = newField(ID_FIELD, id2String(scoreAndID), customType);
d.add(f);
d.add(new SortedDocValuesField(ID_FIELD, new BytesRef(id2String(scoreAndID))));
FieldType customType2 = new FieldType(TextField.TYPE_NOT_STORED);
customType2.setOmitNorms(true);
// for regular search
f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2);
d.add(f);
// for function scoring
f = new LegacyIntField(INT_FIELD, scoreAndID, Store.YES);
d.add(f);
d.add(new NumericDocValuesField(INT_FIELD, scoreAndID));
// for function scoring
f = new LegacyFloatField(FLOAT_FIELD, scoreAndID, Store.YES);
d.add(f);
d.add(new NumericDocValuesField(FLOAT_FIELD, Float.floatToRawIntBits(scoreAndID)));
iw.addDocument(d);
log("added: " + d);
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestDocTermOrds method testNumericEncoded32.
public void testNumericEncoded32() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
doc.add(new LegacyIntField("foo", -3, Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
iw.close();
DirectoryReader ir = DirectoryReader.open(dir);
LeafReader ar = getOnlyLeafReader(ir);
SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT32_TERM_PREFIX);
assertEquals(2, v.getValueCount());
assertEquals(0, v.nextDoc());
assertEquals(1, v.nextOrd());
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
assertEquals(1, v.nextDoc());
assertEquals(0, v.nextOrd());
assertEquals(1, v.nextOrd());
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(-3, LegacyNumericUtils.prefixCodedToInt(value));
value = v.lookupOrd(1);
assertEquals(5, LegacyNumericUtils.prefixCodedToInt(value));
ir.close();
dir.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestFieldCacheSort method testMaxScore.
public void testMaxScore() throws Exception {
Directory d = newDirectory();
// Not RIW because we need exactly 2 segs:
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
int id = 0;
for (int seg = 0; seg < 2; seg++) {
for (int docIDX = 0; docIDX < 10; docIDX++) {
Document doc = new Document();
doc.add(new LegacyIntField("id", docIDX, Field.Store.YES));
StringBuilder sb = new StringBuilder();
for (int i = 0; i < id; i++) {
sb.append(' ');
sb.append("text");
}
doc.add(newTextField("body", sb.toString(), Field.Store.NO));
w.addDocument(doc);
id++;
}
w.commit();
}
IndexReader r = UninvertingReader.wrap(DirectoryReader.open(w), Collections.singletonMap("id", Type.LEGACY_INTEGER));
w.close();
Query q = new TermQuery(new Term("body", "text"));
IndexSearcher s = newSearcher(r);
float maxScore = s.search(q, 10).getMaxScore();
assertEquals(maxScore, s.search(q, 3, Sort.INDEXORDER, random().nextBoolean(), true).getMaxScore(), 0.0);
assertEquals(maxScore, s.search(q, 3, Sort.RELEVANCE, random().nextBoolean(), true).getMaxScore(), 0.0);
assertEquals(maxScore, s.search(q, 3, new Sort(new SortField[] { new SortField("id", SortField.Type.INT, false) }), random().nextBoolean(), true).getMaxScore(), 0.0);
assertEquals(maxScore, s.search(q, 3, new Sort(new SortField[] { new SortField("id", SortField.Type.INT, true) }), random().nextBoolean(), true).getMaxScore(), 0.0);
TestUtil.checkReader(r);
r.close();
d.close();
}
Aggregations