use of java.util.concurrent.ThreadPoolExecutor in project elasticsearch by elastic.
the class ThreadPool method stats.
public ThreadPoolStats stats() {
List<ThreadPoolStats.Stats> stats = new ArrayList<>();
for (ExecutorHolder holder : executors.values()) {
String name = holder.info.getName();
// no need to have info on "same" thread pool
if ("same".equals(name)) {
continue;
}
int threads = -1;
int queue = -1;
int active = -1;
long rejected = -1;
int largest = -1;
long completed = -1;
if (holder.executor() instanceof ThreadPoolExecutor) {
ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor();
threads = threadPoolExecutor.getPoolSize();
queue = threadPoolExecutor.getQueue().size();
active = threadPoolExecutor.getActiveCount();
largest = threadPoolExecutor.getLargestPoolSize();
completed = threadPoolExecutor.getCompletedTaskCount();
RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler();
if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) {
rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected();
}
}
stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed));
}
return new ThreadPoolStats(stats);
}
use of java.util.concurrent.ThreadPoolExecutor in project storm by apache.
the class SaslTransportPlugin method getServer.
@Override
public TServer getServer(TProcessor processor) throws IOException, TTransportException {
int port = type.getPort(storm_conf);
Integer socketTimeout = type.getSocketTimeOut(storm_conf);
TTransportFactory serverTransportFactory = getServerTransportFactory();
TServerSocket serverTransport = null;
if (socketTimeout != null) {
serverTransport = new TServerSocket(port, socketTimeout);
} else {
serverTransport = new TServerSocket(port);
}
int numWorkerThreads = type.getNumThreads(storm_conf);
Integer queueSize = type.getQueueSize(storm_conf);
TThreadPoolServer.Args server_args = new TThreadPoolServer.Args(serverTransport).processor(new TUGIWrapProcessor(processor)).minWorkerThreads(numWorkerThreads).maxWorkerThreads(numWorkerThreads).protocolFactory(new TBinaryProtocol.Factory(false, true));
if (serverTransportFactory != null) {
server_args.transportFactory(serverTransportFactory);
}
BlockingQueue workQueue = new SynchronousQueue();
if (queueSize != null) {
workQueue = new ArrayBlockingQueue(queueSize);
}
ThreadPoolExecutor executorService = new ExtendedThreadPoolExecutor(numWorkerThreads, numWorkerThreads, 60, TimeUnit.SECONDS, workQueue);
server_args.executorService(executorService);
return new TThreadPoolServer(server_args);
}
use of java.util.concurrent.ThreadPoolExecutor in project storm by apache.
the class SimpleTransportPlugin method getServer.
@Override
public TServer getServer(TProcessor processor) throws IOException, TTransportException {
int port = type.getPort(storm_conf);
TNonblockingServerSocket serverTransport = new TNonblockingServerSocket(port);
int numWorkerThreads = type.getNumThreads(storm_conf);
int maxBufferSize = type.getMaxBufferSize(storm_conf);
Integer queueSize = type.getQueueSize(storm_conf);
THsHaServer.Args server_args = new THsHaServer.Args(serverTransport).processor(new SimpleWrapProcessor(processor)).maxWorkerThreads(numWorkerThreads).protocolFactory(new TBinaryProtocol.Factory(false, true, maxBufferSize, -1));
server_args.maxReadBufferBytes = maxBufferSize;
if (queueSize != null) {
server_args.executorService(new ThreadPoolExecutor(numWorkerThreads, numWorkerThreads, 60, TimeUnit.SECONDS, new ArrayBlockingQueue(queueSize)));
}
//construct THsHaServer
return new THsHaServer(server_args);
}
use of java.util.concurrent.ThreadPoolExecutor in project tomcat by apache.
the class ContainerBase method reconfigureStartStopExecutor.
/*
* Implementation note: If there is a demand for more control than this then
* it is likely that the best solution will be to reference an external
* executor.
*/
private void reconfigureStartStopExecutor(int threads) {
if (threads == 1) {
if (!(startStopExecutor instanceof InlineExecutorService)) {
startStopExecutor = new InlineExecutorService();
}
} else {
if (startStopExecutor instanceof ThreadPoolExecutor) {
((ThreadPoolExecutor) startStopExecutor).setMaximumPoolSize(threads);
((ThreadPoolExecutor) startStopExecutor).setCorePoolSize(threads);
} else {
BlockingQueue<Runnable> startStopQueue = new LinkedBlockingQueue<>();
ThreadPoolExecutor tpe = new ThreadPoolExecutor(threads, threads, 10, TimeUnit.SECONDS, startStopQueue, new StartStopThreadFactory(getName() + "-startStop-"));
tpe.allowCoreThreadTimeOut(true);
startStopExecutor = tpe;
}
}
}
use of java.util.concurrent.ThreadPoolExecutor in project hbase by apache.
the class HBaseInterClusterReplicationEndpoint method init.
@Override
public void init(Context context) throws IOException {
super.init(context);
this.conf = HBaseConfiguration.create(ctx.getConfiguration());
decorateConf();
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier);
// A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator
// tasks to terminate when doStop() is called.
long maxTerminationWaitMultiplier = this.conf.getLong("replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER);
this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
// TODO: This connection is replication specific or we should make it particular to
// replication and make replication specific settings such as compression or codec to use
// passing Cells.
this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
this.metrics = context.getMetrics();
// ReplicationQueueInfo parses the peerId out of the znode for us
this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
// per sink thread pool
this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
this.exec.allowCoreThreadTimeOut(true);
this.abortable = ctx.getAbortable();
this.replicationBulkLoadDataEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
if (this.replicationBulkLoadDataEnabled) {
replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID);
}
// Construct base namespace directory and hfile archive directory path
Path rootDir = FSUtils.getRootDir(conf);
Path baseNSDir = new Path(HConstants.BASE_NAMESPACE_DIR);
baseNamespaceDir = new Path(rootDir, baseNSDir);
hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY, baseNSDir));
}
Aggregations