use of org.apache.jackrabbit.oak.plugins.index.lucene.IndexNode in project jackrabbit-oak by apache.
the class DocumentQueue method processDocs.
private void processDocs(String indexPath, Iterable<LuceneDoc> docs, boolean docsFromQueue) {
// Drop the write call if stopped
if (stopped) {
return;
}
IndexNode indexNode = tracker.acquireIndexNode(indexPath);
if (indexNode == null) {
log.debug("No IndexNode found for index [{}].", indexPath);
return;
}
try {
LuceneIndexWriter writer = indexNode.getLocalWriter();
boolean docAdded = false;
for (LuceneDoc doc : docs) {
if (writer == null) {
// IndexDefinition per IndexNode might have changed and local
// indexing is disabled. Ignore
log.debug("No local IndexWriter found for index [{}]. Skipping index " + "entry for [{}]", indexPath, doc.docPath);
return;
}
if (doc.isProcessed()) {
// Skip already processed doc entry
continue;
} else {
doc.markProcessed();
}
if (doc.delete) {
writer.deleteDocuments(doc.docPath);
} else {
writer.updateDocument(doc.docPath, doc.doc);
}
docAdded = true;
String prefix = docsFromQueue ? "Queued" : "Direct";
log.trace("[{}] Updated index with doc {}", prefix, doc);
}
if (docAdded) {
indexNode.refreshReadersOnWriteIfRequired();
}
} catch (Exception e) {
// For now we just log it. Later we need to see if frequent error then to
// temporarily disable indexing for this index
log.warn("Error occurred while indexing index [{}]", indexPath, e);
delegate.uncaughtException(Thread.currentThread(), e);
} finally {
indexNode.release();
}
}
use of org.apache.jackrabbit.oak.plugins.index.lucene.IndexNode in project jackrabbit-oak by apache.
the class LuceneIndexPropertyQuery method getIndexedPaths.
@Override
public Iterable<String> getIndexedPaths(String propertyRelativePath, String value) {
List<String> indexPaths = new ArrayList<>(2);
IndexNode indexNode = tracker.acquireIndexNode(indexPath);
if (indexNode != null) {
try {
TermQuery query = new TermQuery(new Term(propertyRelativePath, value));
// By design such query should not result in more than 1 result.
// So just use 10 as batch size
TopDocs docs = indexNode.getSearcher().search(query, 10);
IndexReader reader = indexNode.getSearcher().getIndexReader();
for (ScoreDoc d : docs.scoreDocs) {
PathStoredFieldVisitor visitor = new PathStoredFieldVisitor();
reader.document(d.doc, visitor);
indexPaths.add(visitor.getPath());
}
} catch (IOException e) {
log.warn("Error occurred while checking index {} for unique value " + "[{}] for [{}]", indexPath, value, propertyRelativePath, e);
} finally {
indexNode.release();
}
}
return indexPaths;
}
use of org.apache.jackrabbit.oak.plugins.index.lucene.IndexNode in project jackrabbit-oak by apache.
the class ReaderRefCountIT method runMultiReaderScenario.
private void runMultiReaderScenario(IndexDefinitionBuilder defnb, NRTIndexFactory nrtFactory, boolean updateIndex) throws Exception {
NodeBuilder builder = root.builder();
builder.child("oak:index").setChildNode("fooIndex", defnb.build());
LuceneIndexEditorContext.configureUniqueId(builder.child("oak:index").child("fooIndex"));
NodeState repoState = builder.getNodeState();
String indexPath = "/oak:index/fooIndex";
AtomicBoolean stop = new AtomicBoolean();
List<Throwable> exceptionList = new CopyOnWriteArrayList<>();
IndexTracker tracker = new IndexTracker(new DefaultIndexReaderFactory(defaultMountInfoProvider(), indexCopier), nrtFactory);
tracker.update(repoState);
CountDownLatch errorLatch = new CountDownLatch(1);
UncaughtExceptionHandler uh = new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
e.printStackTrace();
exceptionList.add(e);
errorLatch.countDown();
}
};
DocumentQueue queue = new DocumentQueue(100, tracker, sameThreadExecutor());
queue.setExceptionHandler(uh);
// Writer should try to refresh same IndexNode within same lock
// i.e. simulate a scenario where DocumentQueue pushes multiple
// sync index docs in same commit
Runnable writer = new Runnable() {
@Override
public void run() {
while (!stop.get()) {
Document d1 = new Document();
d1.add(newPathField("/a/b"));
LuceneDoc lcDoc = LuceneDoc.forUpdate(indexPath, "/a", d1);
queue.addAllSynchronously(of(indexPath, singletonList(lcDoc)));
}
}
};
// Reader would try perform query
Runnable reader = new Runnable() {
@Override
public void run() {
while (!stop.get()) {
IndexNode indexNode = tracker.acquireIndexNode(indexPath);
if (indexNode != null) {
try {
indexNode.getSearcher().search(new MatchAllDocsQuery(), 5);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
indexNode.release();
}
}
}
}
};
Runnable indexUpdater = new Runnable() {
@Override
public void run() {
int count = 0;
while (!stop.get()) {
NodeBuilder b = repoState.builder();
b.getChildNode("oak:index").getChildNode("fooIndex").setProperty("count", count++);
tracker.update(b.getNodeState());
}
}
};
Thread wt = new Thread(writer);
List<Thread> threads = new ArrayList<>();
threads.add(wt);
for (int i = 0; i < noOfThread; i++) {
Thread t = new Thread(reader);
threads.add(t);
t.setUncaughtExceptionHandler(uh);
}
if (updateIndex) {
threads.add(new Thread(indexUpdater));
}
for (Thread t : threads) {
t.start();
}
errorLatch.await(runTimeInSecs, TimeUnit.SECONDS);
stop.set(true);
for (Thread t : threads) {
t.join();
}
nrtFactory.close();
if (!exceptionList.isEmpty()) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
for (Throwable t : exceptionList) {
t.printStackTrace(pw);
}
pw.flush();
fail(sw.toString());
}
}
Aggregations