use of org.apache.lucene.util.ThreadInterruptedException in project lucene-solr by apache.
the class DocumentsWriterPerThreadPool method newThreadState.
/**
* Returns a new {@link ThreadState} iff any new state is available otherwise
* <code>null</code>.
* <p>
* NOTE: the returned {@link ThreadState} is already locked iff non-
* <code>null</code>.
*
* @return a new {@link ThreadState} iff any new state is available otherwise
* <code>null</code>
*/
private synchronized ThreadState newThreadState() {
while (aborted) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
ThreadState threadState = new ThreadState(null);
// lock so nobody else will get this ThreadState
threadState.lock();
threadStates.add(threadState);
return threadState;
}
use of org.apache.lucene.util.ThreadInterruptedException in project lucene-solr by apache.
the class TestIndexWriter method testThreadInterruptDeadlock.
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt(1);
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 100 interrupts to child thread
final int numInterrupts = atLeast(100);
int i = 0;
while (i < numInterrupts) {
// TODO: would be nice to also sometimes interrupt the
// CMS merge threads too ...
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
if (t.failed) {
fail(t.bytesLog.toString("UTF-8"));
}
}
use of org.apache.lucene.util.ThreadInterruptedException in project lucene-solr by apache.
the class IndexSearcher method search.
/**
* Lower-level search API.
* Search all leaves using the given {@link CollectorManager}. In contrast
* to {@link #search(Query, Collector)}, this method will use the searcher's
* {@link ExecutorService} in order to parallelize execution of the collection
* on the configured {@link #leafSlices}.
* @see CollectorManager
* @lucene.experimental
*/
public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException {
if (executor == null) {
final C collector = collectorManager.newCollector();
search(query, collector);
return collectorManager.reduce(Collections.singletonList(collector));
} else {
final List<C> collectors = new ArrayList<>(leafSlices.length);
boolean needsScores = false;
for (int i = 0; i < leafSlices.length; ++i) {
final C collector = collectorManager.newCollector();
collectors.add(collector);
needsScores |= collector.needsScores();
}
final Weight weight = createNormalizedWeight(query, needsScores);
final List<Future<C>> topDocsFutures = new ArrayList<>(leafSlices.length);
for (int i = 0; i < leafSlices.length; ++i) {
final LeafReaderContext[] leaves = leafSlices[i].leaves;
final C collector = collectors.get(i);
topDocsFutures.add(executor.submit(new Callable<C>() {
@Override
public C call() throws Exception {
search(Arrays.asList(leaves), weight, collector);
return collector;
}
}));
}
final List<C> collectedCollectors = new ArrayList<>();
for (Future<C> future : topDocsFutures) {
try {
collectedCollectors.add(future.get());
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
return collectorManager.reduce(collectors);
}
}
use of org.apache.lucene.util.ThreadInterruptedException in project lucene-solr by apache.
the class MergeRateLimiter method maybePause.
/**
* Returns the number of nanoseconds spent in a paused state or <code>-1</code>
* if no pause was applied. If the thread needs pausing, this method delegates
* to the linked {@link OneMergeProgress}.
*/
private long maybePause(long bytes, long curNS) throws MergePolicy.MergeAbortedException {
// Now is a good time to abort the merge:
if (mergeProgress.isAborted()) {
throw new MergePolicy.MergeAbortedException("Merge aborted.");
}
// read from volatile rate once.
double rate = mbPerSec;
double secondsToPause = (bytes / 1024. / 1024.) / rate;
// Time we should sleep until; this is purely instantaneous
// rate (just adds seconds onto the last time we had paused to);
// maybe we should also offer decayed recent history one?
long targetNS = lastNS + (long) (1000000000 * secondsToPause);
long curPauseNS = targetNS - curNS;
// We don't bother with thread pausing if the pause is smaller than 2 msec.
if (curPauseNS <= MIN_PAUSE_NS) {
// Set to curNS, not targetNS, to enforce the instant rate, not
// the "averaged over all history" rate:
lastNS = curNS;
return -1;
}
// we should keep sleeping and the rate may be adjusted in between.
if (curPauseNS > MAX_PAUSE_NS) {
curPauseNS = MAX_PAUSE_NS;
}
long start = System.nanoTime();
try {
mergeProgress.pauseNanos(curPauseNS, rate == 0.0 ? PauseReason.STOPPED : PauseReason.PAUSED, () -> rate == mbPerSec);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
return System.nanoTime() - start;
}
use of org.apache.lucene.util.ThreadInterruptedException in project lucene-solr by apache.
the class SleepingLockWrapper method obtainLock.
@Override
public Lock obtainLock(String lockName) throws IOException {
LockObtainFailedException failureReason = null;
long maxSleepCount = lockWaitTimeout / pollInterval;
long sleepCount = 0;
do {
try {
return in.obtainLock(lockName);
} catch (LockObtainFailedException failed) {
if (failureReason == null) {
failureReason = failed;
}
}
try {
Thread.sleep(pollInterval);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} while (sleepCount++ < maxSleepCount || lockWaitTimeout == LOCK_OBTAIN_WAIT_FOREVER);
// we failed to obtain the lock in the required time
String reason = "Lock obtain timed out: " + this.toString();
if (failureReason != null) {
reason += ": " + failureReason;
}
throw new LockObtainFailedException(reason, failureReason);
}
Aggregations