use of org.apache.lucene.index.SortedDocValues in project elasticsearch by elastic.
the class ParentFieldSubFetchPhase method getParentId.
public static String getParentId(ParentFieldMapper fieldMapper, LeafReader reader, int docId) {
try {
SortedDocValues docValues = reader.getSortedDocValues(fieldMapper.name());
if (docValues == null) {
// hit has no _parent field.
return null;
}
BytesRef parentId = docValues.get(docId);
return parentId.length > 0 ? parentId.utf8ToString() : null;
} catch (IOException e) {
throw ExceptionsHelper.convertToElastic(e);
}
}
use of org.apache.lucene.index.SortedDocValues in project elasticsearch by elastic.
the class GlobalOrdinalsStringTermsAggregator method newCollector.
protected LeafBucketCollector newCollector(final RandomAccessOrds ords, final LeafBucketCollector sub) {
grow(ords.getValueCount());
final SortedDocValues singleValues = DocValues.unwrapSingleton(ords);
if (singleValues != null) {
return new LeafBucketCollectorBase(sub, ords) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
final int ord = singleValues.getOrd(doc);
if (ord >= 0) {
collectExistingBucket(sub, doc, ord);
}
}
};
} else {
return new LeafBucketCollectorBase(sub, ords) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
ords.setDocument(doc);
final int numOrds = ords.cardinality();
for (int i = 0; i < numOrds; i++) {
final long globalOrd = ords.ordAt(i);
collectExistingBucket(sub, doc, globalOrd);
}
}
};
}
}
use of org.apache.lucene.index.SortedDocValues in project elasticsearch by elastic.
the class ParentToChildrenAggregator method getLeafCollector.
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
assert globalOrdinals != null;
Scorer parentScorer = parentFilter.scorer(ctx);
final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer);
return new LeafBucketCollector() {
@Override
public void collect(int docId, long bucket) throws IOException {
if (parentDocs.get(docId)) {
long globalOrdinal = globalOrdinals.getOrd(docId);
if (globalOrdinal != -1) {
if (parentOrdToBuckets.get(globalOrdinal) == -1) {
parentOrdToBuckets.set(globalOrdinal, bucket);
} else {
long[] bucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
if (bucketOrds != null) {
bucketOrds = Arrays.copyOf(bucketOrds, bucketOrds.length + 1);
bucketOrds[bucketOrds.length - 1] = bucket;
parentOrdToOtherBuckets.put(globalOrdinal, bucketOrds);
} else {
parentOrdToOtherBuckets.put(globalOrdinal, new long[] { bucket });
}
multipleBucketsPerParentOrd = true;
}
}
}
}
};
}
use of org.apache.lucene.index.SortedDocValues in project elasticsearch by elastic.
the class ParentChildFieldDataTests method testThreads.
public void testThreads() throws Exception {
final ParentChildIndexFieldData indexFieldData = getForField(childType);
final DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId(new Index("test", ""), 0));
final IndexParentChildFieldData global = indexFieldData.loadGlobal(reader);
final AtomicReference<Exception> error = new AtomicReference<>();
final int numThreads = scaledRandomIntBetween(3, 8);
final Thread[] threads = new Thread[numThreads];
final CountDownLatch latch = new CountDownLatch(1);
final Map<Object, BytesRef[]> expected = new HashMap<>();
for (LeafReaderContext context : reader.leaves()) {
AtomicParentChildFieldData leafData = global.load(context);
SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
final BytesRef[] ids = new BytesRef[parentIds.getValueCount()];
for (int j = 0; j < parentIds.getValueCount(); ++j) {
final BytesRef id = parentIds.lookupOrd(j);
if (id != null) {
ids[j] = BytesRef.deepCopyOf(id);
}
}
expected.put(context.reader().getCoreCacheKey(), ids);
}
for (int i = 0; i < numThreads; ++i) {
threads[i] = new Thread() {
@Override
public void run() {
try {
latch.await();
for (int i = 0; i < 100000; ++i) {
for (LeafReaderContext context : reader.leaves()) {
AtomicParentChildFieldData leafData = global.load(context);
SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
final BytesRef[] expectedIds = expected.get(context.reader().getCoreCacheKey());
for (int j = 0; j < parentIds.getValueCount(); ++j) {
final BytesRef id = parentIds.lookupOrd(j);
assertEquals(expectedIds[j], id);
}
}
}
} catch (Exception e) {
error.compareAndSet(null, e);
}
}
};
threads[i].start();
}
latch.countDown();
for (Thread thread : threads) {
thread.join();
}
if (error.get() != null) {
throw error.get();
}
}
use of org.apache.lucene.index.SortedDocValues in project elasticsearch by elastic.
the class SingleOrdinalsTests method testSvValues.
public void testSvValues() throws IOException {
int numDocs = 1000000;
int numOrdinals = numDocs / 4;
Map<Integer, Long> controlDocToOrdinal = new HashMap<>();
OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
long ordinal = builder.currentOrdinal();
for (int doc = 0; doc < numDocs; doc++) {
if (doc % numOrdinals == 0) {
ordinal = builder.nextOrdinal();
}
controlDocToOrdinal.put(doc, ordinal);
builder.addDoc(doc);
}
Ordinals ords = builder.build();
assertThat(ords, instanceOf(SinglePackedOrdinals.class));
RandomAccessOrds docs = ords.ordinals();
final SortedDocValues singleOrds = DocValues.unwrapSingleton(docs);
assertNotNull(singleOrds);
for (Map.Entry<Integer, Long> entry : controlDocToOrdinal.entrySet()) {
assertThat(entry.getValue(), equalTo((long) singleOrds.getOrd(entry.getKey())));
}
}
Aggregations