use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestFieldCacheSort method testLegacyInt.
/** Tests sorting on type legacy int */
public void testLegacyInt() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new LegacyIntField("value", 300000, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LegacyIntField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LegacyIntField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.LEGACY_INTEGER));
writer.close();
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(3, td.totalHits);
// numeric order
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestFieldCacheSort method testLegacyIntMissing.
/** Tests sorting on type legacy int with a missing value */
public void testLegacyIntMissing() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new LegacyIntField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LegacyIntField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.LEGACY_INTEGER));
writer.close();
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(3, td.totalHits);
// null is treated as a 0
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestLegacyTerms method testIntFieldMinMax.
public void testIntFieldMinMax() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
int numDocs = atLeast(100);
int minValue = Integer.MAX_VALUE;
int maxValue = Integer.MIN_VALUE;
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
int num = random().nextInt();
minValue = Math.min(num, minValue);
maxValue = Math.max(num, maxValue);
doc.add(new LegacyIntField("field", num, Field.Store.NO));
w.addDocument(doc);
}
IndexReader r = w.getReader();
Terms terms = MultiFields.getTerms(r, "field");
assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
r.close();
w.close();
dir.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestDocTermOrds method testRandomWithPrefix.
public void testRandomWithPrefix() throws Exception {
Directory dir = newDirectory();
final Set<String> prefixes = new HashSet<>();
final int numPrefix = TestUtil.nextInt(random(), 2, 7);
if (VERBOSE) {
System.out.println("TEST: use " + numPrefix + " prefixes");
}
while (prefixes.size() < numPrefix) {
prefixes.add(TestUtil.randomRealisticUnicodeString(random()));
//prefixes.add(_TestUtil.randomSimpleString(random));
}
final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]);
final int NUM_TERMS = atLeast(20);
final Set<BytesRef> terms = new HashSet<>();
while (terms.size() < NUM_TERMS) {
final String s = prefixesArray[random().nextInt(prefixesArray.length)] + TestUtil.randomRealisticUnicodeString(random());
//final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomSimpleString(random);
if (s.length() > 0) {
terms.add(new BytesRef(s));
}
}
final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]);
Arrays.sort(termsArray);
final int NUM_DOCS = atLeast(100);
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// Sometimes swap in codec that impls ord():
if (random().nextInt(10) == 7) {
Codec codec = TestUtil.alwaysPostingsFormat(TestUtil.getPostingsFormatWithOrds(random()));
conf.setCodec(codec);
}
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf);
final int[][] idToOrds = new int[NUM_DOCS][];
final Set<Integer> ordsForDocSet = new HashSet<>();
for (int id = 0; id < NUM_DOCS; id++) {
Document doc = new Document();
doc.add(new LegacyIntField("id", id, Field.Store.YES));
final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
while (ordsForDocSet.size() < termCount) {
ordsForDocSet.add(random().nextInt(termsArray.length));
}
final int[] ordsForDoc = new int[termCount];
int upto = 0;
if (VERBOSE) {
System.out.println("TEST: doc id=" + id);
}
for (int ord : ordsForDocSet) {
ordsForDoc[upto++] = ord;
Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO);
if (VERBOSE) {
System.out.println(" f=" + termsArray[ord].utf8ToString());
}
doc.add(field);
}
ordsForDocSet.clear();
Arrays.sort(ordsForDoc);
idToOrds[id] = ordsForDoc;
w.addDocument(doc);
}
final DirectoryReader r = w.getReader();
w.close();
if (VERBOSE) {
System.out.println("TEST: reader=" + r);
}
LeafReader slowR = SlowCompositeReaderWrapper.wrap(r);
TestUtil.checkReader(slowR);
for (String prefix : prefixesArray) {
final BytesRef prefixRef = prefix == null ? null : new BytesRef(prefix);
final int[][] idToOrdsPrefix = new int[NUM_DOCS][];
for (int id = 0; id < NUM_DOCS; id++) {
final int[] docOrds = idToOrds[id];
final List<Integer> newOrds = new ArrayList<>();
for (int ord : idToOrds[id]) {
if (StringHelper.startsWith(termsArray[ord], prefixRef)) {
newOrds.add(ord);
}
}
final int[] newOrdsArray = new int[newOrds.size()];
int upto = 0;
for (int ord : newOrds) {
newOrdsArray[upto++] = ord;
}
idToOrdsPrefix[id] = newOrdsArray;
}
for (LeafReaderContext ctx : r.leaves()) {
if (VERBOSE) {
System.out.println("\nTEST: sub=" + ctx.reader());
}
verify(ctx.reader(), idToOrdsPrefix, termsArray, prefixRef);
}
// ord, so this forces the OrdWrapper to run:
if (VERBOSE) {
System.out.println("TEST: top reader");
}
verify(slowR, idToOrdsPrefix, termsArray, prefixRef);
}
FieldCache.DEFAULT.purgeByCacheKey(slowR.getCoreCacheHelper().getKey());
r.close();
dir.close();
}
use of org.apache.solr.legacy.LegacyIntField in project lucene-solr by apache.
the class TestDocTermOrds method testRandom.
public void testRandom() throws Exception {
Directory dir = newDirectory();
final int NUM_TERMS = atLeast(20);
final Set<BytesRef> terms = new HashSet<>();
while (terms.size() < NUM_TERMS) {
final String s = TestUtil.randomRealisticUnicodeString(random());
//final String s = _TestUtil.randomSimpleString(random);
if (s.length() > 0) {
terms.add(new BytesRef(s));
}
}
final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]);
Arrays.sort(termsArray);
final int NUM_DOCS = atLeast(100);
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// Sometimes swap in codec that impls ord():
if (random().nextInt(10) == 7) {
// Make sure terms index has ords:
Codec codec = TestUtil.alwaysPostingsFormat(TestUtil.getPostingsFormatWithOrds(random()));
conf.setCodec(codec);
}
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf);
final int[][] idToOrds = new int[NUM_DOCS][];
final Set<Integer> ordsForDocSet = new HashSet<>();
for (int id = 0; id < NUM_DOCS; id++) {
Document doc = new Document();
doc.add(new LegacyIntField("id", id, Field.Store.YES));
final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
while (ordsForDocSet.size() < termCount) {
ordsForDocSet.add(random().nextInt(termsArray.length));
}
final int[] ordsForDoc = new int[termCount];
int upto = 0;
if (VERBOSE) {
System.out.println("TEST: doc id=" + id);
}
for (int ord : ordsForDocSet) {
ordsForDoc[upto++] = ord;
Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO);
if (VERBOSE) {
System.out.println(" f=" + termsArray[ord].utf8ToString());
}
doc.add(field);
}
ordsForDocSet.clear();
Arrays.sort(ordsForDoc);
idToOrds[id] = ordsForDoc;
w.addDocument(doc);
}
final DirectoryReader r = w.getReader();
w.close();
if (VERBOSE) {
System.out.println("TEST: reader=" + r);
}
for (LeafReaderContext ctx : r.leaves()) {
if (VERBOSE) {
System.out.println("\nTEST: sub=" + ctx.reader());
}
verify(ctx.reader(), idToOrds, termsArray, null);
}
// ord, so this forces the OrdWrapper to run:
if (VERBOSE) {
System.out.println("TEST: top reader");
}
LeafReader slowR = SlowCompositeReaderWrapper.wrap(r);
TestUtil.checkReader(slowR);
verify(slowR, idToOrds, termsArray, null);
FieldCache.DEFAULT.purgeByCacheKey(slowR.getCoreCacheHelper().getKey());
r.close();
dir.close();
}
Aggregations