use of org.apache.lucene.index.PrefixCodedTerms.TermIterator in project lucene-solr by apache.
the class DocValuesTermsQuery method toString.
@Override
public String toString(String defaultField) {
StringBuilder builder = new StringBuilder();
boolean first = true;
TermIterator iterator = termData.iterator();
for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
if (!first) {
builder.append(' ');
}
first = false;
builder.append(new Term(iterator.field(), term).toString());
}
return builder.toString();
}
use of org.apache.lucene.index.PrefixCodedTerms.TermIterator in project lucene-solr by apache.
the class PointInSetIncludingScoreQuery method toString.
@Override
public final String toString(String field) {
final StringBuilder sb = new StringBuilder();
if (this.field.equals(field) == false) {
sb.append(this.field);
sb.append(':');
}
sb.append("{");
TermIterator iterator = sortedPackedPoints.iterator();
byte[] pointBytes = new byte[bytesPerDim];
boolean first = true;
for (BytesRef point = iterator.next(); point != null; point = iterator.next()) {
if (first == false) {
sb.append(" ");
}
first = false;
System.arraycopy(point.bytes, point.offset, pointBytes, 0, pointBytes.length);
sb.append(toString(pointBytes));
}
sb.append("}");
return sb.toString();
}
use of org.apache.lucene.index.PrefixCodedTerms.TermIterator in project lucene-solr by apache.
the class TestDocumentsWriterDeleteQueue method testUpdateDelteSlices.
public void testUpdateDelteSlices() {
DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER;
Integer[] ids = new Integer[size];
for (int i = 0; i < ids.length; i++) {
ids[i] = random().nextInt();
}
DeleteSlice slice1 = queue.newSlice();
DeleteSlice slice2 = queue.newSlice();
BufferedUpdates bd1 = new BufferedUpdates("bd1");
BufferedUpdates bd2 = new BufferedUpdates("bd2");
int last1 = 0;
int last2 = 0;
Set<Term> uniqueValues = new HashSet<>();
for (int j = 0; j < ids.length; j++) {
Integer i = ids[j];
// create an array here since we compare identity below against tailItem
Term[] term = new Term[] { new Term("id", i.toString()) };
uniqueValues.add(term[0]);
queue.addDelete(term);
if (random().nextInt(20) == 0 || j == ids.length - 1) {
queue.updateSlice(slice1);
assertTrue(slice1.isTailItem(term));
slice1.apply(bd1, j);
assertAllBetween(last1, j, bd1, ids);
last1 = j + 1;
}
if (random().nextInt(10) == 5 || j == ids.length - 1) {
queue.updateSlice(slice2);
assertTrue(slice2.isTailItem(term));
slice2.apply(bd2, j);
assertAllBetween(last2, j, bd2, ids);
last2 = j + 1;
}
assertEquals(j + 1, queue.numGlobalTermDeletes());
}
assertEquals(uniqueValues, bd1.terms.keySet());
assertEquals(uniqueValues, bd2.terms.keySet());
HashSet<Term> frozenSet = new HashSet<>();
BytesRefBuilder bytesRef = new BytesRefBuilder();
TermIterator iter = queue.freezeGlobalBuffer(null).termIterator();
while (iter.next() != null) {
bytesRef.copyBytes(iter.bytes);
frozenSet.add(new Term(iter.field(), bytesRef.toBytesRef()));
}
assertEquals(uniqueValues, frozenSet);
assertEquals("num deletes must be 0 after freeze", 0, queue.numGlobalTermDeletes());
}
use of org.apache.lucene.index.PrefixCodedTerms.TermIterator in project lucene-solr by apache.
the class TestPrefixCodedTerms method testEmpty.
public void testEmpty() {
PrefixCodedTerms.Builder b = new PrefixCodedTerms.Builder();
PrefixCodedTerms pb = b.finish();
TermIterator iter = pb.iterator();
assertNull(iter.next());
}
use of org.apache.lucene.index.PrefixCodedTerms.TermIterator in project lucene-solr by apache.
the class DocValuesTermsQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field);
final LongBitSet bits = new LongBitSet(values.getValueCount());
boolean matchesAtLeastOneTerm = false;
TermIterator iterator = termData.iterator();
for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
final long ord = values.lookupTerm(term);
if (ord >= 0) {
matchesAtLeastOneTerm = true;
bits.set(ord);
}
}
if (matchesAtLeastOneTerm == false) {
return null;
}
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(values) {
@Override
public boolean matches() throws IOException {
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
if (bits.get(ord)) {
return true;
}
}
return false;
}
@Override
public float matchCost() {
// lookup in a bitset
return 3;
}
});
}
};
}
Aggregations