use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class BitSetFilterCacheTests method testRejectOtherIndex.
public void testRejectOtherIndex() throws IOException {
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
}
});
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
writer.addDocument(new Document());
DirectoryReader reader = DirectoryReader.open(writer);
writer.close();
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test2", "_na_", 0));
BitSetProducer producer = cache.getBitSetProducer(new MatchAllDocsQuery());
try {
producer.getBitSet(reader.leaves().get(0));
fail();
} catch (IllegalStateException expected) {
assertEquals("Trying to load bit set for index [test2] with cache of index [test]", expected.getMessage());
} finally {
IOUtils.close(reader, dir);
}
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class AbstractStringFieldDataTestCase method testNestedSorting.
public void testNestedSorting(MultiValueMode sortMode) throws IOException {
final String[] values = new String[randomIntBetween(2, 20)];
for (int i = 0; i < values.length; ++i) {
values[i] = TestUtil.randomSimpleString(random());
}
final int numParents = scaledRandomIntBetween(10, 3072);
List<Document> docs = new ArrayList<>();
FixedBitSet parents = new FixedBitSet(64);
for (int i = 0; i < numParents; ++i) {
docs.clear();
final int numChildren = randomInt(4);
for (int j = 0; j < numChildren; ++j) {
final Document child = new Document();
final int numValues = randomInt(3);
for (int k = 0; k < numValues; ++k) {
final String value = RandomPicks.randomFrom(random(), values);
addField(child, "text", value);
}
docs.add(child);
}
final Document parent = new Document();
parent.add(new StringField("type", "parent", Store.YES));
final String value = RandomPicks.randomFrom(random(), values);
if (value != null) {
addField(parent, "text", value);
}
docs.add(parent);
int bit = parents.prevSetBit(parents.length() - 1) + docs.size();
parents = FixedBitSet.ensureCapacity(parents, bit);
parents.set(bit);
writer.addDocuments(docs);
if (randomInt(10) == 0) {
writer.commit();
}
}
DirectoryReader directoryReader = DirectoryReader.open(writer);
directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0));
IndexSearcher searcher = new IndexSearcher(directoryReader);
IndexFieldData<?> fieldData = getForField("text");
final Object missingValue;
switch(randomInt(4)) {
case 0:
missingValue = "_first";
break;
case 1:
missingValue = "_last";
break;
case 2:
missingValue = new BytesRef(RandomPicks.randomFrom(random(), values));
break;
default:
missingValue = new BytesRef(TestUtil.randomSimpleString(random()));
break;
}
Query parentFilter = new TermQuery(new Term("type", "parent"));
Query childFilter = Queries.not(parentFilter);
Nested nested = createNested(searcher, parentFilter, childFilter);
BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode, nested);
ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("text", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort);
assertTrue(topDocs.scoreDocs.length > 0);
BytesRef previous = null;
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final int docID = topDocs.scoreDocs[i].doc;
assertTrue("expected " + docID + " to be a parent", parents.get(docID));
BytesRef cmpValue = null;
for (int child = parents.prevSetBit(docID - 1) + 1; child < docID; ++child) {
String[] sVals = searcher.doc(child).getValues("text");
final BytesRef[] vals;
if (sVals.length == 0) {
vals = new BytesRef[0];
} else {
vals = new BytesRef[sVals.length];
for (int j = 0; j < vals.length; ++j) {
vals[j] = new BytesRef(sVals[j]);
}
}
for (BytesRef value : vals) {
if (cmpValue == null) {
cmpValue = value;
} else if (sortMode == MultiValueMode.MIN && value.compareTo(cmpValue) < 0) {
cmpValue = value;
} else if (sortMode == MultiValueMode.MAX && value.compareTo(cmpValue) > 0) {
cmpValue = value;
}
}
}
if (cmpValue == null) {
if ("_first".equals(missingValue)) {
cmpValue = new BytesRef();
} else if ("_last".equals(missingValue) == false) {
cmpValue = (BytesRef) missingValue;
}
}
if (previous != null && cmpValue != null) {
assertTrue(previous.utf8ToString() + " / " + cmpValue.utf8ToString(), previous.compareTo(cmpValue) <= 0);
}
previous = cmpValue;
}
searcher.getIndexReader().close();
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class FieldDataCacheTests method testLoadGlobal_neverCacheIfFieldIsMissing.
public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter iw = new IndexWriter(dir, iwc);
long numDocs = scaledRandomIntBetween(32, 128);
for (int i = 1; i <= numDocs; i++) {
Document doc = new Document();
doc.add(new SortedSetDocValuesField("field1", new BytesRef(String.valueOf(i))));
doc.add(new StringField("field2", String.valueOf(i), Field.Store.NO));
iw.addDocument(doc);
if (i % 24 == 0) {
iw.commit();
}
}
iw.close();
DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0));
DummyAccountingFieldDataCache fieldDataCache = new DummyAccountingFieldDataCache();
// Testing SortedSetDVOrdinalsIndexFieldData:
SortedSetDVOrdinalsIndexFieldData sortedSetDVOrdinalsIndexFieldData = createSortedDV("field1", fieldDataCache);
sortedSetDVOrdinalsIndexFieldData.loadGlobal(ir);
assertThat(fieldDataCache.cachedGlobally, equalTo(1));
sortedSetDVOrdinalsIndexFieldData.loadGlobal(new FieldMaskingReader("field1", ir));
assertThat(fieldDataCache.cachedGlobally, equalTo(1));
// Testing PagedBytesIndexFieldData
PagedBytesIndexFieldData pagedBytesIndexFieldData = createPagedBytes("field2", fieldDataCache);
pagedBytesIndexFieldData.loadGlobal(ir);
assertThat(fieldDataCache.cachedGlobally, equalTo(2));
pagedBytesIndexFieldData.loadGlobal(new FieldMaskingReader("field2", ir));
assertThat(fieldDataCache.cachedGlobally, equalTo(2));
ir.close();
dir.close();
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class ParentChildFieldDataTests method testThreads.
public void testThreads() throws Exception {
final ParentChildIndexFieldData indexFieldData = getForField(childType);
final DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId(new Index("test", ""), 0));
final IndexParentChildFieldData global = indexFieldData.loadGlobal(reader);
final AtomicReference<Exception> error = new AtomicReference<>();
final int numThreads = scaledRandomIntBetween(3, 8);
final Thread[] threads = new Thread[numThreads];
final CountDownLatch latch = new CountDownLatch(1);
final Map<Object, BytesRef[]> expected = new HashMap<>();
for (LeafReaderContext context : reader.leaves()) {
AtomicParentChildFieldData leafData = global.load(context);
SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
final BytesRef[] ids = new BytesRef[parentIds.getValueCount()];
for (int j = 0; j < parentIds.getValueCount(); ++j) {
final BytesRef id = parentIds.lookupOrd(j);
if (id != null) {
ids[j] = BytesRef.deepCopyOf(id);
}
}
expected.put(context.reader().getCoreCacheKey(), ids);
}
for (int i = 0; i < numThreads; ++i) {
threads[i] = new Thread() {
@Override
public void run() {
try {
latch.await();
for (int i = 0; i < 100000; ++i) {
for (LeafReaderContext context : reader.leaves()) {
AtomicParentChildFieldData leafData = global.load(context);
SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
final BytesRef[] expectedIds = expected.get(context.reader().getCoreCacheKey());
for (int j = 0; j < parentIds.getValueCount(); ++j) {
final BytesRef id = parentIds.lookupOrd(j);
assertEquals(expectedIds[j], id);
}
}
}
} catch (Exception e) {
error.compareAndSet(null, e);
}
}
};
threads[i].start();
}
latch.countDown();
for (Thread thread : threads) {
thread.join();
}
if (error.get() != null) {
throw error.get();
}
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class IndexFieldDataServiceTests method testFieldDataCacheListener.
public void testFieldDataCacheListener() throws Exception {
final IndexService indexService = createIndex("test");
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
// copy the ifdService since we can set the listener only once.
final IndexFieldDataService ifdService = new IndexFieldDataService(indexService.getIndexSettings(), indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
final MappedFieldType mapper1 = new TextFieldMapper.Builder("s").fielddata(true).build(ctx).fieldType();
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
Document doc = new Document();
doc.add(new StringField("s", "thisisastring", Store.NO));
writer.addDocument(doc);
DirectoryReader open = DirectoryReader.open(writer);
final boolean wrap = randomBoolean();
final IndexReader reader = wrap ? ElasticsearchDirectoryReader.wrap(open, new ShardId("test", "_na_", 1)) : open;
final AtomicInteger onCacheCalled = new AtomicInteger();
final AtomicInteger onRemovalCalled = new AtomicInteger();
ifdService.setListener(new IndexFieldDataCache.Listener() {
@Override
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onCacheCalled.incrementAndGet();
}
@Override
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onRemovalCalled.incrementAndGet();
}
});
IndexFieldData<?> ifd = ifdService.getForField(mapper1);
LeafReaderContext leafReaderContext = reader.getContext().leaves().get(0);
AtomicFieldData load = ifd.load(leafReaderContext);
assertEquals(1, onCacheCalled.get());
assertEquals(0, onRemovalCalled.get());
reader.close();
load.close();
writer.close();
assertEquals(1, onCacheCalled.get());
assertEquals(1, onRemovalCalled.get());
ifdService.clear();
}
Aggregations