use of org.apache.lucene.search.IndexSearcher in project neo4j by neo4j.
the class SimpleUniquenessVerifierTest method numericIndexVerificationSomeWithDuplicates.
@Test
public void numericIndexVerificationSomeWithDuplicates() throws Exception {
List<Object> data = asList(Integer.MAX_VALUE, Long.MAX_VALUE, 42, Long.MAX_VALUE);
PropertyAccessor propertyAccessor = newPropertyAccessor(data);
insert(data);
IndexSearcher indexSearcher = spy(searcherManager.acquire());
try {
runUniquenessVerification(propertyAccessor, indexSearcher);
fail("Exception expected");
} catch (Throwable t) {
assertThat(t, instanceOf(IndexEntryConflictException.class));
}
verify(indexSearcher).search(any(Query.class), any(Collector.class));
}
use of org.apache.lucene.search.IndexSearcher in project neo4j by neo4j.
the class SimpleUniquenessVerifierTest method numericIndexVerificationSomePossibleDuplicates.
@Test
public void numericIndexVerificationSomePossibleDuplicates() throws Exception {
List<Object> data = asList(42, Long.MAX_VALUE - 1, Long.MAX_VALUE);
PropertyAccessor propertyAccessor = newPropertyAccessor(data);
insert(data);
IndexSearcher indexSearcher = spy(searcherManager.acquire());
runUniquenessVerification(propertyAccessor, indexSearcher);
verify(indexSearcher).search(any(Query.class), any(Collector.class));
}
use of org.apache.lucene.search.IndexSearcher in project neo4j by neo4j.
the class WritableIndexReferenceFactory method refresh.
/**
* If nothing has changed underneath (since the searcher was last created
* or refreshed) {@code searcher} is returned. But if something has changed a
* refreshed searcher is returned. It makes use if the
* {@link DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)} which faster than opening an index
* from
* scratch.
*
* @param indexReference the {@link IndexReference} to refresh.
* @return a refreshed version of the searcher or, if nothing has changed,
* {@code null}.
* @throws RuntimeException if there's a problem with the index.
*/
@Override
IndexReference refresh(IndexReference indexReference) {
try {
DirectoryReader reader = (DirectoryReader) indexReference.getSearcher().getIndexReader();
IndexWriter writer = indexReference.getWriter();
IndexReader reopened = DirectoryReader.openIfChanged(reader, writer);
if (reopened != null) {
IndexSearcher newSearcher = newIndexSearcher(indexReference.getIdentifier(), reopened);
indexReference.detachOrClose();
return new WritableIndexReference(indexReference.getIdentifier(), newSearcher, writer);
}
return indexReference;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.lucene.search.IndexSearcher in project neo4j by neo4j.
the class SimpleUniquenessVerifier method verify.
@Override
public void verify(PropertyAccessor accessor, int[] propKeyIds) throws IndexEntryConflictException, IOException {
try {
DuplicateCheckingCollector collector = DuplicateCheckingCollector.forProperties(accessor, propKeyIds);
IndexSearcher searcher = indexSearcher();
for (LeafReaderContext leafReaderContext : searcher.getIndexReader().leaves()) {
Fields fields = leafReaderContext.reader().fields();
for (String field : fields) {
if (LuceneDocumentStructure.NODE_ID_KEY.equals(field)) {
continue;
}
TermsEnum terms = LuceneDocumentStructure.originalTerms(fields.terms(field), field);
BytesRef termsRef;
while ((termsRef = terms.next()) != null) {
if (terms.docFreq() > 1) {
collector.reset();
searcher.search(new TermQuery(new Term(field, termsRef)), collector);
}
}
}
}
} catch (IOException e) {
Throwable cause = e.getCause();
if (cause instanceof IndexEntryConflictException) {
throw (IndexEntryConflictException) cause;
}
throw e;
}
}
use of org.apache.lucene.search.IndexSearcher in project elasticsearch by elastic.
the class FiltersAggregatorTests method testKeyedFilter.
public void testKeyedFilter() throws Exception {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
Document document = new Document();
document.add(new Field("field", "foo", fieldType));
indexWriter.addDocument(document);
document.clear();
document.add(new Field("field", "else", fieldType));
indexWriter.addDocument(document);
// make sure we have more than one segment to test the merge
indexWriter.commit();
document.add(new Field("field", "foo", fieldType));
indexWriter.addDocument(document);
document.clear();
document.add(new Field("field", "bar", fieldType));
indexWriter.addDocument(document);
document.clear();
document.add(new Field("field", "foobar", fieldType));
indexWriter.addDocument(document);
indexWriter.commit();
document.clear();
document.add(new Field("field", "something", fieldType));
indexWriter.addDocument(document);
indexWriter.commit();
document.clear();
document.add(new Field("field", "foobar", fieldType));
indexWriter.addDocument(document);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
FiltersAggregator.KeyedFilter[] keys = new FiltersAggregator.KeyedFilter[6];
keys[0] = new FiltersAggregator.KeyedFilter("foobar", QueryBuilders.termQuery("field", "foobar"));
keys[1] = new FiltersAggregator.KeyedFilter("bar", QueryBuilders.termQuery("field", "bar"));
keys[2] = new FiltersAggregator.KeyedFilter("foo", QueryBuilders.termQuery("field", "foo"));
keys[3] = new FiltersAggregator.KeyedFilter("foo2", QueryBuilders.termQuery("field", "foo"));
keys[4] = new FiltersAggregator.KeyedFilter("same", QueryBuilders.termQuery("field", "foo"));
// filter name already present so it should be merge with the previous one ?
keys[5] = new FiltersAggregator.KeyedFilter("same", QueryBuilders.termQuery("field", "bar"));
FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", keys);
builder.otherBucket(true);
builder.otherBucketKey("other");
for (boolean doReduce : new boolean[] { true, false }) {
final InternalFilters filters;
if (doReduce) {
filters = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
} else {
filters = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
}
assertEquals(filters.getBuckets().size(), 7);
assertEquals(filters.getBucketByKey("foobar").getDocCount(), 2);
assertEquals(filters.getBucketByKey("foo").getDocCount(), 2);
assertEquals(filters.getBucketByKey("foo2").getDocCount(), 2);
assertEquals(filters.getBucketByKey("bar").getDocCount(), 1);
assertEquals(filters.getBucketByKey("same").getDocCount(), 1);
assertEquals(filters.getBucketByKey("other").getDocCount(), 2);
}
indexReader.close();
directory.close();
}
Aggregations