Search in sources :

Example 56 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.

the class TestSearcherManager method testConcurrentIndexCloseSearchAndRefresh.

public void testConcurrentIndexCloseSearchAndRefresh() throws Exception {
    final Directory dir = newFSDirectory(createTempDir());
    AtomicReference<IndexWriter> writerRef = new AtomicReference<>();
    final MockAnalyzer analyzer = new MockAnalyzer(random());
    analyzer.setMaxTokenLength(IndexWriter.MAX_TERM_LENGTH);
    writerRef.set(new IndexWriter(dir, newIndexWriterConfig(analyzer)));
    AtomicReference<SearcherManager> mgrRef = new AtomicReference<>();
    mgrRef.set(new SearcherManager(writerRef.get(), null));
    final AtomicBoolean stop = new AtomicBoolean();
    Thread indexThread = new Thread() {

        @Override
        public void run() {
            try {
                LineFileDocs docs = new LineFileDocs(random());
                long runTimeSec = TEST_NIGHTLY ? atLeast(10) : atLeast(2);
                long endTime = System.nanoTime() + runTimeSec * 1000000000;
                while (System.nanoTime() < endTime) {
                    IndexWriter w = writerRef.get();
                    w.addDocument(docs.nextDoc());
                    if (random().nextInt(1000) == 17) {
                        if (random().nextBoolean()) {
                            w.close();
                        } else {
                            w.rollback();
                        }
                        writerRef.set(new IndexWriter(dir, newIndexWriterConfig(analyzer)));
                    }
                }
                docs.close();
                stop.set(true);
                if (VERBOSE) {
                    System.out.println("TEST: index count=" + writerRef.get().maxDoc());
                }
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    Thread searchThread = new Thread() {

        @Override
        public void run() {
            try {
                long totCount = 0;
                while (stop.get() == false) {
                    SearcherManager mgr = mgrRef.get();
                    if (mgr != null) {
                        IndexSearcher searcher;
                        try {
                            searcher = mgr.acquire();
                        } catch (AlreadyClosedException ace) {
                            // ok
                            continue;
                        }
                        totCount += searcher.getIndexReader().maxDoc();
                        mgr.release(searcher);
                    }
                }
                if (VERBOSE) {
                    System.out.println("TEST: search totCount=" + totCount);
                }
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    Thread refreshThread = new Thread() {

        @Override
        public void run() {
            try {
                int refreshCount = 0;
                int aceCount = 0;
                while (stop.get() == false) {
                    SearcherManager mgr = mgrRef.get();
                    if (mgr != null) {
                        refreshCount++;
                        try {
                            mgr.maybeRefreshBlocking();
                        } catch (AlreadyClosedException ace) {
                            // ok
                            aceCount++;
                            continue;
                        }
                    }
                }
                if (VERBOSE) {
                    System.out.println("TEST: refresh count=" + refreshCount + " aceCount=" + aceCount);
                }
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    Thread closeThread = new Thread() {

        @Override
        public void run() {
            try {
                int closeCount = 0;
                int aceCount = 0;
                while (stop.get() == false) {
                    SearcherManager mgr = mgrRef.get();
                    assert mgr != null;
                    mgr.close();
                    closeCount++;
                    while (stop.get() == false) {
                        try {
                            mgrRef.set(new SearcherManager(writerRef.get(), null));
                            break;
                        } catch (AlreadyClosedException ace) {
                            // ok
                            aceCount++;
                        }
                    }
                }
                if (VERBOSE) {
                    System.out.println("TEST: close count=" + closeCount + " aceCount=" + aceCount);
                }
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    indexThread.start();
    searchThread.start();
    refreshThread.start();
    closeThread.start();
    indexThread.join();
    searchThread.join();
    refreshThread.join();
    closeThread.join();
    mgrRef.get().close();
    writerRef.get().close();
    dir.close();
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) IndexWriter(org.apache.lucene.index.IndexWriter) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory) LineFileDocs(org.apache.lucene.util.LineFileDocs)

Example 57 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.

the class TestSearcherManager method testIntermediateClose.

public void testIntermediateClose() throws IOException, InterruptedException {
    Directory dir = newDirectory();
    // Test can deadlock if we use SMS:
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergeScheduler(new ConcurrentMergeScheduler()));
    writer.addDocument(new Document());
    writer.commit();
    final CountDownLatch awaitEnterWarm = new CountDownLatch(1);
    final CountDownLatch awaitClose = new CountDownLatch(1);
    final AtomicBoolean triedReopen = new AtomicBoolean(false);
    final ExecutorService es = random().nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory("testIntermediateClose"));
    final SearcherFactory factory = new SearcherFactory() {

        @Override
        public IndexSearcher newSearcher(IndexReader r, IndexReader previous) {
            try {
                if (triedReopen.get()) {
                    awaitEnterWarm.countDown();
                    awaitClose.await();
                }
            } catch (InterruptedException e) {
            //
            }
            return new IndexSearcher(r, es);
        }
    };
    final SearcherManager searcherManager = random().nextBoolean() ? new SearcherManager(dir, factory) : new SearcherManager(writer, random().nextBoolean(), false, factory);
    if (VERBOSE) {
        System.out.println("sm created");
    }
    IndexSearcher searcher = searcherManager.acquire();
    try {
        assertEquals(1, searcher.getIndexReader().numDocs());
    } finally {
        searcherManager.release(searcher);
    }
    writer.addDocument(new Document());
    writer.commit();
    final AtomicBoolean success = new AtomicBoolean(false);
    final Throwable[] exc = new Throwable[1];
    Thread thread = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                triedReopen.set(true);
                if (VERBOSE) {
                    System.out.println("NOW call maybeRefresh");
                }
                searcherManager.maybeRefresh();
                success.set(true);
            } catch (AlreadyClosedException e) {
            // expected
            } catch (Throwable e) {
                if (VERBOSE) {
                    System.out.println("FAIL: unexpected exc");
                    e.printStackTrace(System.out);
                }
                exc[0] = e;
                // use success as the barrier here to make sure we see the write
                success.set(false);
            }
        }
    });
    thread.start();
    if (VERBOSE) {
        System.out.println("THREAD started");
    }
    awaitEnterWarm.await();
    if (VERBOSE) {
        System.out.println("NOW call close");
    }
    searcherManager.close();
    awaitClose.countDown();
    thread.join();
    expectThrows(AlreadyClosedException.class, () -> {
        searcherManager.acquire();
    });
    assertFalse(success.get());
    assertTrue(triedReopen.get());
    assertNull("" + exc[0], exc[0]);
    writer.close();
    dir.close();
    if (es != null) {
        es.shutdown();
        es.awaitTermination(1, TimeUnit.SECONDS);
    }
}
Also used : NamedThreadFactory(org.apache.lucene.util.NamedThreadFactory) ConcurrentMergeScheduler(org.apache.lucene.index.ConcurrentMergeScheduler) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Document(org.apache.lucene.document.Document) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) IndexWriter(org.apache.lucene.index.IndexWriter) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) ExecutorService(java.util.concurrent.ExecutorService) IndexReader(org.apache.lucene.index.IndexReader) Directory(org.apache.lucene.store.Directory)

Example 58 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project OpenGrok by OpenGrok.

the class IndexDatabase method indexParallel.

/**
 * Executes the second, parallel stage of indexing.
 * @param args contains a list of files to index, found during the earlier
 * stage
 */
private void indexParallel(IndexDownArgs args) throws IOException {
    int worksCount = args.works.size();
    if (worksCount < 1)
        return;
    AtomicInteger successCounter = new AtomicInteger();
    AtomicInteger currentCounter = new AtomicInteger();
    AtomicInteger alreadyClosedCounter = new AtomicInteger();
    ObjectPool<Ctags> ctagsPool = parallelizer.getCtagsPool();
    Map<Boolean, List<IndexFileWork>> bySuccess = null;
    try {
        bySuccess = parallelizer.getForkJoinPool().submit(() -> args.works.parallelStream().collect(Collectors.groupingByConcurrent((x) -> {
            int tries = 0;
            Ctags pctags = null;
            boolean ret;
            while (true) {
                try {
                    if (alreadyClosedCounter.get() > 0) {
                        ret = false;
                    } else {
                        pctags = ctagsPool.get();
                        addFile(x.file, x.path, pctags);
                        successCounter.incrementAndGet();
                        ret = true;
                    }
                } catch (AlreadyClosedException e) {
                    alreadyClosedCounter.incrementAndGet();
                    String errmsg = String.format("ERROR addFile(): %s", x.file);
                    LOGGER.log(Level.SEVERE, errmsg, e);
                    x.exception = e;
                    ret = false;
                } catch (InterruptedException e) {
                    // Allow one retry if interrupted
                    if (++tries <= 1)
                        continue;
                    LOGGER.log(Level.WARNING, "No retry: {0}", x.file);
                    x.exception = e;
                    ret = false;
                } catch (RuntimeException | IOException e) {
                    String errmsg = String.format("ERROR addFile(): %s", x.file);
                    LOGGER.log(Level.WARNING, errmsg, e);
                    x.exception = e;
                    ret = false;
                } finally {
                    if (pctags != null) {
                        pctags.reset();
                        ctagsPool.release(pctags);
                    }
                }
                int ncount = currentCounter.incrementAndGet();
                printProgress(ncount, worksCount);
                return ret;
            }
        }))).get();
    } catch (InterruptedException | ExecutionException e) {
        int successCount = successCounter.intValue();
        double successPct = 100.0 * successCount / worksCount;
        String exmsg = String.format("%d successes (%.1f%%) after aborting parallel-indexing", successCount, successPct);
        LOGGER.log(Level.SEVERE, exmsg, e);
    }
    args.cur_count = currentCounter.intValue();
    // Start with failureCount=worksCount, and then subtract successes.
    int failureCount = worksCount;
    if (bySuccess != null) {
        List<IndexFileWork> successes = bySuccess.getOrDefault(Boolean.TRUE, null);
        if (successes != null)
            failureCount -= successes.size();
    }
    if (failureCount > 0) {
        double pctFailed = 100.0 * failureCount / worksCount;
        String exmsg = String.format("%d failures (%.1f%%) while parallel-indexing", failureCount, pctFailed);
        LOGGER.log(Level.WARNING, exmsg);
    }
    /**
     * Encountering an AlreadyClosedException is severe enough to abort the
     * run, since it will fail anyway later upon trying to commit().
     */
    int numAlreadyClosed = alreadyClosedCounter.get();
    if (numAlreadyClosed > 0) {
        throw new AlreadyClosedException(String.format("count=%d", numAlreadyClosed));
    }
}
Also used : AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Ctags(org.opensolaris.opengrok.analysis.Ctags) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ExecutionException(java.util.concurrent.ExecutionException)

Example 59 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project ignite by apache.

the class GridLuceneInputStream method clone.

/**
 * {@inheritDoc}
 */
@Override
public IndexInput clone() {
    GridLuceneInputStream clone = (GridLuceneInputStream) super.clone();
    if (closed)
        throw new AlreadyClosedException(toString());
    clone.isClone = true;
    return clone;
}
Also used : AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException)

Example 60 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project OpenGrok by OpenGrok.

the class IndexDatabase method indexParallel.

/**
 * Executes the second, parallel stage of indexing.
 * @param dir the parent directory (when appended to SOURCE_ROOT)
 * @param args contains a list of files to index, found during the earlier
 * stage
 */
private void indexParallel(String dir, IndexDownArgs args) {
    int worksCount = args.works.size();
    if (worksCount < 1) {
        return;
    }
    AtomicInteger successCounter = new AtomicInteger();
    AtomicInteger currentCounter = new AtomicInteger();
    AtomicInteger alreadyClosedCounter = new AtomicInteger();
    IndexerParallelizer parallelizer = RuntimeEnvironment.getInstance().getIndexerParallelizer();
    ObjectPool<Ctags> ctagsPool = parallelizer.getCtagsPool();
    Map<Boolean, List<IndexFileWork>> bySuccess = null;
    try (Progress progress = new Progress(LOGGER, dir, worksCount)) {
        bySuccess = parallelizer.getForkJoinPool().submit(() -> args.works.parallelStream().collect(Collectors.groupingByConcurrent((x) -> {
            int tries = 0;
            Ctags pctags = null;
            boolean ret;
            Statistics stats = new Statistics();
            while (true) {
                try {
                    if (alreadyClosedCounter.get() > 0) {
                        ret = false;
                    } else {
                        pctags = ctagsPool.get();
                        addFile(x.file, x.path, pctags);
                        successCounter.incrementAndGet();
                        ret = true;
                    }
                } catch (AlreadyClosedException e) {
                    alreadyClosedCounter.incrementAndGet();
                    String errmsg = String.format("ERROR addFile(): %s", x.file);
                    LOGGER.log(Level.SEVERE, errmsg, e);
                    x.exception = e;
                    ret = false;
                } catch (InterruptedException e) {
                    // Allow one retry if interrupted
                    if (++tries <= 1) {
                        continue;
                    }
                    LOGGER.log(Level.WARNING, "No retry: {0}", x.file);
                    x.exception = e;
                    ret = false;
                } catch (RuntimeException | IOException e) {
                    String errmsg = String.format("ERROR addFile(): %s", x.file);
                    LOGGER.log(Level.WARNING, errmsg, e);
                    x.exception = e;
                    ret = false;
                } finally {
                    if (pctags != null) {
                        pctags.reset();
                        ctagsPool.release(pctags);
                    }
                }
                progress.increment();
                stats.report(LOGGER, Level.FINEST, String.format("file ''%s'' %s", x.file, ret ? "indexed" : "failed indexing"));
                return ret;
            }
        }))).get();
    } catch (InterruptedException | ExecutionException e) {
        int successCount = successCounter.intValue();
        double successPct = 100.0 * successCount / worksCount;
        String exmsg = String.format("%d successes (%.1f%%) after aborting parallel-indexing", successCount, successPct);
        LOGGER.log(Level.SEVERE, exmsg, e);
    }
    args.cur_count = currentCounter.intValue();
    // Start with failureCount=worksCount, and then subtract successes.
    int failureCount = worksCount;
    if (bySuccess != null) {
        List<IndexFileWork> successes = bySuccess.getOrDefault(Boolean.TRUE, null);
        if (successes != null) {
            failureCount -= successes.size();
        }
    }
    if (failureCount > 0) {
        double pctFailed = 100.0 * failureCount / worksCount;
        String exmsg = String.format("%d failures (%.1f%%) while parallel-indexing", failureCount, pctFailed);
        LOGGER.log(Level.WARNING, exmsg);
    }
    /*
         * Encountering an AlreadyClosedException is severe enough to abort the
         * run, since it will fail anyway later upon trying to commit().
         */
    int numAlreadyClosed = alreadyClosedCounter.get();
    if (numAlreadyClosed > 0) {
        throw new AlreadyClosedException(String.format("count=%d", numAlreadyClosed));
    }
}
Also used : Progress(org.opengrok.indexer.util.Progress) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IOException(java.io.IOException) Statistics(org.opengrok.indexer.util.Statistics) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Ctags(org.opengrok.indexer.analysis.Ctags) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)79 IOException (java.io.IOException)53 LockObtainFailedException (org.apache.lucene.store.LockObtainFailedException)16 CountDownLatch (java.util.concurrent.CountDownLatch)15 MockDirectoryWrapper (org.apache.lucene.store.MockDirectoryWrapper)14 TranslogCorruptedException (org.elasticsearch.index.translog.TranslogCorruptedException)13 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)12 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)11 Document (org.apache.lucene.document.Document)11 ElasticsearchException (org.elasticsearch.ElasticsearchException)11 ReleasableLock (org.elasticsearch.common.util.concurrent.ReleasableLock)10 UncheckedIOException (java.io.UncheckedIOException)9 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)9 EOFException (java.io.EOFException)8 ArrayList (java.util.ArrayList)7 FileNotFoundException (java.io.FileNotFoundException)6 FileAlreadyExistsException (java.nio.file.FileAlreadyExistsException)6 NoSuchFileException (java.nio.file.NoSuchFileException)6 BrokenBarrierException (java.util.concurrent.BrokenBarrierException)6 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)6