use of java.util.concurrent.RunnableFuture in project lucene-solr by apache.
the class LTRScoringQuery method createWeightsParallel.
// end of call CreateWeightCallable
private void createWeightsParallel(IndexSearcher searcher, boolean needsScores, List<Feature.FeatureWeight> featureWeights, Collection<Feature> features) throws RuntimeException {
final SolrQueryRequest req = getRequest();
List<Future<Feature.FeatureWeight>> futures = new ArrayList<>(features.size());
try {
for (final Feature f : features) {
CreateWeightCallable callable = new CreateWeightCallable(f, searcher, needsScores, req);
RunnableFuture<Feature.FeatureWeight> runnableFuture = new FutureTask<>(callable);
// always acquire before the ltrSemaphore is acquired, to guarantee a that the current query is within the limit for max. threads
querySemaphore.acquire();
//may block and/or interrupt
ltrThreadMgr.acquireLTRSemaphore();
//releases semaphore when done
ltrThreadMgr.execute(runnableFuture);
futures.add(runnableFuture);
}
//Loop over futures to get the feature weight objects
for (final Future<Feature.FeatureWeight> future : futures) {
// future.get() will block if the job is still running
featureWeights.add(future.get());
}
} catch (Exception e) {
// To catch InterruptedException and ExecutionException
log.info("Error while creating weights in LTR: InterruptedException", e);
throw new RuntimeException("Error while creating weights in LTR: " + e.getMessage(), e);
}
}
use of java.util.concurrent.RunnableFuture in project uPortal by Jasig.
the class DynamicThreadPoolExecutorTest method testExecutorsNewCachedThreadPool.
@Test
public void testExecutorsNewCachedThreadPool() throws Exception {
//See Executors.newCachedThreadPool();
final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(0, 2, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()) {
@Override
protected void afterExecute(Runnable r, Throwable t) {
//This happens after Future.get() returns so it reduces the chance for timing issues in the test
final LatchFutureTask lr = (LatchFutureTask) r;
lr.done();
}
@SuppressWarnings("unchecked")
@Override
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
if (runnable instanceof RunnableFuture) {
return (RunnableFuture<T>) runnable;
}
return super.newTaskFor(runnable, value);
}
};
testThreadPoolExecutor(threadPoolExecutor, false);
}
use of java.util.concurrent.RunnableFuture in project phoenix by apache.
the class JobManager method createThreadPoolExec.
public static ThreadPoolExecutor createThreadPoolExec(int keepAliveMs, int size, int queueSize, boolean useInstrumentedThreadPool) {
BlockingQueue<Runnable> queue;
if (queueSize == 0) {
// Specialized for 0 length.
queue = new SynchronousQueue<Runnable>();
} else {
queue = new JobManager<Runnable>(queueSize);
}
String name = "phoenix-" + PHOENIX_POOL_INDEX.getAndIncrement();
ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat(name + "-thread-%s").setDaemon(true).setThreadFactory(new ContextClassLoaderThreadFactory(JobManager.class.getClassLoader())).build();
ThreadPoolExecutor exec;
if (useInstrumentedThreadPool) {
// For thread pool, set core threads = max threads -- we don't ever want to exceed core threads, but want to go up to core threads *before* using the queue.
exec = new InstrumentedThreadPoolExecutor(name, size, size, keepAliveMs, TimeUnit.MILLISECONDS, queue, threadFactory) {
@Override
protected <T> RunnableFuture<T> newTaskFor(Callable<T> call) {
return new InstrumentedJobFutureTask<T>(call);
}
@Override
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
return new InstrumentedJobFutureTask<T>(runnable, value);
}
};
} else {
// For thread pool, set core threads = max threads -- we don't ever want to exceed core threads, but want to go up to core threads *before* using the queue.
exec = new ThreadPoolExecutor(size, size, keepAliveMs, TimeUnit.MILLISECONDS, queue, threadFactory) {
@Override
protected <T> RunnableFuture<T> newTaskFor(Callable<T> call) {
// Override this so we can create a JobFutureTask so we can extract out the parentJobId (otherwise, in the default FutureTask, it is private).
return new JobFutureTask<T>(call);
}
@Override
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
return new JobFutureTask<T>(runnable, value);
}
};
}
// ... and allow core threads to time out. This just keeps things clean when idle, and is nice for ftests modes, etc., where we'd especially like these not to linger.
exec.allowCoreThreadTimeOut(true);
return exec;
}
use of java.util.concurrent.RunnableFuture in project lucene-solr by apache.
the class SimpleFacets method getFacetFieldCounts.
/**
* Returns a list of value constraints and the associated facet counts
* for each facet field specified in the params.
*
* @see FacetParams#FACET_FIELD
* @see #getFieldMissingCount
* @see #getFacetTermEnumCounts
*/
@SuppressWarnings("unchecked")
public NamedList<Object> getFacetFieldCounts() throws IOException, SyntaxError {
NamedList<Object> res = new SimpleOrderedMap<>();
String[] facetFs = global.getParams(FacetParams.FACET_FIELD);
if (null == facetFs) {
return res;
}
// Passing a negative number for FACET_THREADS implies an unlimited number of threads is acceptable.
// Also, a subtlety of directExecutor is that no matter how many times you "submit" a job, it's really
// just a method call in that it's run by the calling thread.
int maxThreads = req.getParams().getInt(FacetParams.FACET_THREADS, 0);
Executor executor = maxThreads == 0 ? directExecutor : facetExecutor;
final Semaphore semaphore = new Semaphore((maxThreads <= 0) ? Integer.MAX_VALUE : maxThreads);
List<Future<NamedList>> futures = new ArrayList<>(facetFs.length);
if (fdebugParent != null) {
fdebugParent.putInfoItem("maxThreads", maxThreads);
}
try {
//Loop over fields; submit to executor, keeping the future
for (String f : facetFs) {
if (fdebugParent != null) {
fdebug = new FacetDebugInfo();
fdebugParent.addChild(fdebug);
}
final ParsedParams parsed = parseParams(FacetParams.FACET_FIELD, f);
final SolrParams localParams = parsed.localParams;
final String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
final String key = parsed.key;
final String facetValue = parsed.facetValue;
Callable<NamedList> callable = () -> {
try {
NamedList<Object> result = new SimpleOrderedMap<>();
if (termList != null) {
List<String> terms = StrUtils.splitSmart(termList, ",", true);
result.add(key, getListedTermCounts(facetValue, parsed, terms));
} else {
result.add(key, getTermCounts(facetValue, parsed));
}
return result;
} catch (SolrException se) {
throw se;
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Exception during facet.field: " + facetValue, e);
} finally {
semaphore.release();
}
};
RunnableFuture<NamedList> runnableFuture = new FutureTask<>(callable);
//may block and/or interrupt
semaphore.acquire();
//releases semaphore when done
executor.execute(runnableFuture);
futures.add(runnableFuture);
}
//Loop over futures to get the values. The order is the same as facetFs but shouldn't matter.
for (Future<NamedList> future : futures) {
res.addAll(future.get());
}
assert semaphore.availablePermits() >= maxThreads;
} catch (InterruptedException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error while processing facet fields: InterruptedException", e);
} catch (ExecutionException ee) {
//unwrap
Throwable e = ee.getCause();
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error while processing facet fields: " + e.toString(), e);
}
return res;
}
Aggregations