use of org.apache.lucene.search.SearcherFactory in project lucene-solr by apache.
the class TestIDVersionPostingsFormat method testGlobalVersions.
// Simulates optimistic concurrency in a distributed indexing app and confirms the latest version always wins:
public void testGlobalVersions() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
IDSource idsSource = getRandomIDs();
int numIDs = atLeast(100);
if (VERBOSE) {
System.out.println("TEST: " + numIDs + " ids");
}
Set<String> idsSeen = new HashSet<String>();
while (idsSeen.size() < numIDs) {
idsSeen.add(idsSource.next());
}
final String[] ids = idsSeen.toArray(new String[numIDs]);
final Object[] locks = new Object[ids.length];
for (int i = 0; i < locks.length; i++) {
locks[i] = new Object();
}
final AtomicLong nextVersion = new AtomicLong();
final SearcherManager mgr = new SearcherManager(w.w, new SearcherFactory());
final Long missingValue = -1L;
final LiveFieldValues<IndexSearcher, Long> versionValues = new LiveFieldValues<IndexSearcher, Long>(mgr, missingValue) {
@Override
protected Long lookupFromSearcher(IndexSearcher s, String id) {
// We always return missing: the caller then does a lookup against the current reader
return missingValue;
}
};
// Maps to the version the id was lasted indexed with:
final Map<String, Long> truth = new ConcurrentHashMap<>();
final CountDownLatch startingGun = new CountDownLatch(1);
Thread[] threads = new Thread[TestUtil.nextInt(random(), 2, 7)];
final int versionType = random().nextInt(3);
if (VERBOSE) {
if (versionType == 0) {
System.out.println("TEST: use random versions");
} else if (versionType == 1) {
System.out.println("TEST: use monotonic versions");
} else {
System.out.println("TEST: use nanotime versions");
}
}
// Run for 3 sec in normal tests, else 60 seconds for nightly:
final long stopTime = System.currentTimeMillis() + (TEST_NIGHTLY ? 60000 : 3000);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
runForReal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void runForReal() throws IOException, InterruptedException {
startingGun.await();
PerThreadVersionPKLookup lookup = null;
IndexReader lookupReader = null;
while (System.currentTimeMillis() < stopTime) {
// Intentionally pull version first, and then sleep/yield, to provoke version conflicts:
long newVersion;
if (versionType == 0) {
// Random:
newVersion = random().nextLong() & 0x3fffffffffffffffL;
} else if (versionType == 1) {
// Monotonic
newVersion = nextVersion.getAndIncrement();
} else {
newVersion = System.nanoTime();
}
if (versionType != 0) {
if (random().nextBoolean()) {
Thread.yield();
} else {
Thread.sleep(TestUtil.nextInt(random(), 1, 4));
}
}
int x = random().nextInt(ids.length);
// Only one thread can update an ID at once:
synchronized (locks[x]) {
String id = ids[x];
// We will attempt to index id with newVersion, but only do so if id wasn't yet indexed, or it was indexed with an older
// version (< newVersion):
// Must lookup the RT value before pulling from the index, in case a reopen happens just after we lookup:
Long currentVersion = versionValues.get(id);
IndexSearcher s = mgr.acquire();
try {
if (VERBOSE)
System.out.println("\n" + Thread.currentThread().getName() + ": update id=" + id + " newVersion=" + newVersion);
if (lookup == null || lookupReader != s.getIndexReader()) {
// TODO: sort of messy; we could add reopen to PerThreadVersionPKLookup?
// TODO: this is thin ice .... that we don't incRef/decRef this reader we are implicitly holding onto:
lookupReader = s.getIndexReader();
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": open new PK lookup reader=" + lookupReader);
lookup = new PerThreadVersionPKLookup(lookupReader, "id");
}
Long truthVersion = truth.get(id);
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": truthVersion=" + truthVersion);
boolean doIndex;
if (currentVersion == missingValue) {
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": id not in RT cache");
int otherDocID = lookup.lookup(new BytesRef(id), newVersion + 1);
if (otherDocID == -1) {
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": id not in index, or version is <= newVersion; will index");
doIndex = true;
} else {
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": id is in index with version=" + lookup.getVersion() + "; will not index");
doIndex = false;
if (truthVersion.longValue() != lookup.getVersion()) {
System.out.println(Thread.currentThread() + ": now fail0!");
}
assertEquals(truthVersion.longValue(), lookup.getVersion());
}
} else {
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": id is in RT cache: currentVersion=" + currentVersion);
doIndex = newVersion > currentVersion;
}
if (doIndex) {
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": now index");
boolean passes = truthVersion == null || truthVersion.longValue() <= newVersion;
if (passes == false) {
System.out.println(Thread.currentThread() + ": now fail!");
}
assertTrue(passes);
Document doc = new Document();
doc.add(makeIDField(id, newVersion));
w.updateDocument(new Term("id", id), doc);
truth.put(id, newVersion);
versionValues.add(id, newVersion);
} else {
if (VERBOSE)
System.out.println(Thread.currentThread().getName() + ": skip index");
assertNotNull(truthVersion);
assertTrue(truthVersion.longValue() >= newVersion);
}
} finally {
mgr.release(s);
}
}
}
}
};
threads[i].start();
}
startingGun.countDown();
// Keep reopening the NRT reader until all indexing threads are done:
refreshLoop: while (true) {
Thread.sleep(TestUtil.nextInt(random(), 1, 10));
mgr.maybeRefresh();
for (Thread thread : threads) {
if (thread.isAlive()) {
continue refreshLoop;
}
}
break;
}
// Verify final index against truth:
for (int i = 0; i < 2; i++) {
mgr.maybeRefresh();
IndexSearcher s = mgr.acquire();
try {
IndexReader r = s.getIndexReader();
// cannot assert this: maybe not all IDs were indexed
/*
assertEquals(numIDs, r.numDocs());
if (i == 1) {
// After forceMerge no deleted docs:
assertEquals(numIDs, r.maxDoc());
}
*/
PerThreadVersionPKLookup lookup = new PerThreadVersionPKLookup(r, "id");
for (Map.Entry<String, Long> ent : truth.entrySet()) {
assertTrue(lookup.lookup(new BytesRef(ent.getKey()), -1L) != -1);
assertEquals(ent.getValue().longValue(), lookup.getVersion());
}
} finally {
mgr.release(s);
}
if (i == 1) {
break;
}
// forceMerge and verify again
w.forceMerge(1);
}
mgr.close();
w.close();
dir.close();
}
use of org.apache.lucene.search.SearcherFactory in project lucene-solr by apache.
the class TestTryDelete method testDeleteDocuments.
public void testDeleteDocuments() throws IOException {
Directory directory = createIndex();
IndexWriter writer = getWriter(directory);
ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer, new SearcherFactory());
IndexSearcher searcher = mgr.acquire();
TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
assertEquals(1, topDocs.totalHits);
long result = writer.deleteDocuments(new TermQuery(new Term("foo", "0")));
assertTrue(result != -1);
// writer.commit();
assertTrue(writer.hasDeletions());
mgr.maybeRefresh();
searcher = mgr.acquire();
topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
assertEquals(0, topDocs.totalHits);
}
Aggregations