use of java.util.concurrent.SynchronousQueue in project geode by apache.
the class TcpServer method createExecutor.
private static PooledExecutorWithDMStats createExecutor(PoolStatHelper poolHelper, final ThreadGroup threadGroup) {
ThreadFactory factory = new ThreadFactory() {
private final AtomicInteger threadNum = new AtomicInteger();
public Thread newThread(Runnable r) {
Thread thread = new Thread(threadGroup, r, "locator request thread[" + threadNum.incrementAndGet() + "]");
thread.setDaemon(true);
return thread;
}
};
return new PooledExecutorWithDMStats(new SynchronousQueue(), MAX_POOL_SIZE, poolHelper, factory, POOL_IDLE_TIMEOUT, new ThreadPoolExecutor.CallerRunsPolicy());
}
use of java.util.concurrent.SynchronousQueue in project geode by apache.
the class TCPConduit method startAcceptor.
/**
* binds the server socket and gets threads going
*/
private void startAcceptor() throws ConnectionException {
int localPort;
int p = this.port;
InetAddress ba = this.address;
{
ThreadPoolExecutor tmp_hsPool = null;
String gName = "P2P-Handshaker " + ba + ":" + p;
final ThreadGroup socketThreadGroup = LoggingThreadGroup.createThreadGroup(gName, logger);
ThreadFactory socketThreadFactory = new ThreadFactory() {
int connNum = -1;
public Thread newThread(Runnable command) {
int tnum;
synchronized (this) {
tnum = ++connNum;
}
String tName = socketThreadGroup.getName() + " Thread " + tnum;
return new Thread(socketThreadGroup, command, tName);
}
};
try {
final BlockingQueue bq = new SynchronousQueue();
final RejectedExecutionHandler reh = new RejectedExecutionHandler() {
public void rejectedExecution(Runnable r, ThreadPoolExecutor pool) {
try {
bq.put(r);
} catch (InterruptedException ex) {
// preserve the state
Thread.currentThread().interrupt();
throw new RejectedExecutionException(LocalizedStrings.TCPConduit_INTERRUPTED.toLocalizedString(), ex);
}
}
};
tmp_hsPool = new ThreadPoolExecutor(1, HANDSHAKE_POOL_SIZE, HANDSHAKE_POOL_KEEP_ALIVE_TIME, TimeUnit.SECONDS, bq, socketThreadFactory, reh);
} catch (IllegalArgumentException poolInitException) {
throw new ConnectionException(LocalizedStrings.TCPConduit_WHILE_CREATING_HANDSHAKE_POOL.toLocalizedString(), poolInitException);
}
this.hsPool = tmp_hsPool;
}
createServerSocket();
try {
localPort = socket.getLocalPort();
id = new InetSocketAddress(socket.getInetAddress(), localPort);
stopped = false;
ThreadGroup group = LoggingThreadGroup.createThreadGroup("P2P Listener Threads", logger);
thread = new Thread(group, this, "P2P Listener Thread " + id);
thread.setDaemon(true);
try {
thread.setPriority(thread.getThreadGroup().getMaxPriority());
} catch (Exception e) {
logger.info(LocalizedMessage.create(LocalizedStrings.TCPConduit_UNABLE_TO_SET_LISTENER_PRIORITY__0, e.getMessage()));
}
if (!Boolean.getBoolean("p2p.test.inhibitAcceptor")) {
thread.start();
} else {
logger.fatal(LocalizedMessage.create(LocalizedStrings.TCPConduit_INHIBITACCEPTOR));
socket.close();
this.hsPool.shutdownNow();
}
} catch (IOException io) {
String s = "While creating ServerSocket on port " + p;
throw new ConnectionException(s, io);
}
this.port = localPort;
}
use of java.util.concurrent.SynchronousQueue in project lucene-solr by apache.
the class BasicDistributedZkTest method testStopAndStartCoresInOneInstance.
private void testStopAndStartCoresInOneInstance() throws Exception {
SolrClient client = clients.get(0);
String url3 = getBaseUrl(client);
try (final HttpSolrClient httpSolrClient = getHttpSolrClient(url3)) {
httpSolrClient.setConnectionTimeout(15000);
httpSolrClient.setSoTimeout(60000);
ThreadPoolExecutor executor = null;
try {
executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("testExecutor"));
int cnt = 3;
// create the cores
createCores(httpSolrClient, executor, "multiunload2", 1, cnt);
} finally {
if (executor != null) {
ExecutorUtil.shutdownAndAwaitTermination(executor);
}
}
}
ChaosMonkey.stop(cloudJettys.get(0).jetty);
printLayout();
Thread.sleep(5000);
ChaosMonkey.start(cloudJettys.get(0).jetty);
cloudClient.getZkStateReader().forceUpdateCollection("multiunload2");
try {
cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000);
} catch (SolrException e) {
printLayout();
throw e;
}
printLayout();
}
use of java.util.concurrent.SynchronousQueue in project lucene-solr by apache.
the class OverseerTaskProcessor method run.
@Override
public void run() {
log.debug("Process current queue of overseer operations");
LeaderStatus isLeader = amILeader();
while (isLeader == LeaderStatus.DONT_KNOW) {
log.debug("am_i_leader unclear {}", isLeader);
// not a no, not a yes, try ask again
isLeader = amILeader();
}
String oldestItemInWorkQueue = null;
// hasLeftOverItems - used for avoiding re-execution of async tasks that were processed by a previous Overseer.
// This variable is set in case there's any task found on the workQueue when the OCP starts up and
// the id for the queue tail is used as a marker to check for the task in completed/failed map in zk.
// Beyond the marker, all tasks can safely be assumed to have never been executed.
boolean hasLeftOverItems = true;
try {
oldestItemInWorkQueue = workQueue.getTailId();
} catch (KeeperException e) {
// We don't need to handle this. This is just a fail-safe which comes in handy in skipping already processed
// async calls.
SolrException.log(log, "", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (oldestItemInWorkQueue == null)
hasLeftOverItems = false;
else
log.debug("Found already existing elements in the work-queue. Last element: {}", oldestItemInWorkQueue);
try {
prioritizer.prioritizeOverseerNodes(myId);
} catch (Exception e) {
if (!zkStateReader.getZkClient().isClosed()) {
log.error("Unable to prioritize overseer ", e);
}
}
// TODO: Make maxThreads configurable.
this.tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, MAX_PARALLEL_TASKS, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("OverseerThreadFactory"));
try {
while (!this.isClosed) {
try {
isLeader = amILeader();
if (LeaderStatus.NO == isLeader) {
break;
} else if (LeaderStatus.YES != isLeader) {
log.debug("am_i_leader unclear {}", isLeader);
// not a no, not a yes, try asking again
continue;
}
log.debug("Cleaning up work-queue. #Running tasks: {}", runningTasks.size());
cleanUpWorkQueue();
printTrackingMaps();
boolean waited = false;
while (runningTasks.size() > MAX_PARALLEL_TASKS) {
synchronized (waitLock) {
//wait for 100 ms or till a task is complete
waitLock.wait(100);
}
waited = true;
}
if (waited)
cleanUpWorkQueue();
ArrayList<QueueEvent> heads = new ArrayList<>(blockedTasks.size() + MAX_PARALLEL_TASKS);
heads.addAll(blockedTasks.values());
// to clear out at least a few items in the queue before we read more items
if (heads.size() < MAX_BLOCKED_TASKS) {
//instead of reading MAX_PARALLEL_TASKS items always, we should only fetch as much as we can execute
int toFetch = Math.min(MAX_BLOCKED_TASKS - heads.size(), MAX_PARALLEL_TASKS - runningTasks.size());
List<QueueEvent> newTasks = workQueue.peekTopN(toFetch, excludedTasks, 2000L);
log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks);
heads.addAll(newTasks);
} else {
// Prevent free-spinning this loop.
Thread.sleep(1000);
}
if (isClosed)
break;
if (heads.isEmpty()) {
continue;
}
// clear it now; may get refilled below.
blockedTasks.clear();
taskBatch.batchId++;
boolean tooManyTasks = false;
for (QueueEvent head : heads) {
if (!tooManyTasks) {
synchronized (runningTasks) {
tooManyTasks = runningTasks.size() >= MAX_PARALLEL_TASKS;
}
}
if (tooManyTasks) {
// Too many tasks are running, just shove the rest into the "blocked" queue.
if (blockedTasks.size() < MAX_BLOCKED_TASKS)
blockedTasks.put(head.getId(), head);
continue;
}
if (runningZKTasks.contains(head.getId()))
continue;
final ZkNodeProps message = ZkNodeProps.load(head.getBytes());
OverseerMessageHandler messageHandler = selector.selectOverseerMessageHandler(message);
final String asyncId = message.getStr(ASYNC);
if (hasLeftOverItems) {
if (head.getId().equals(oldestItemInWorkQueue))
hasLeftOverItems = false;
if (asyncId != null && (completedMap.contains(asyncId) || failureMap.contains(asyncId))) {
log.debug("Found already processed task in workQueue, cleaning up. AsyncId [{}]", asyncId);
workQueue.remove(head);
continue;
}
}
String operation = message.getStr(Overseer.QUEUE_OPERATION);
OverseerMessageHandler.Lock lock = messageHandler.lockTask(message, taskBatch);
if (lock == null) {
log.debug("Exclusivity check failed for [{}]", message.toString());
//we may end crossing the size of the MAX_BLOCKED_TASKS. They are fine
if (blockedTasks.size() < MAX_BLOCKED_TASKS)
blockedTasks.put(head.getId(), head);
continue;
}
try {
markTaskAsRunning(head, asyncId);
log.debug("Marked task [{}] as running", head.getId());
} catch (KeeperException.NodeExistsException e) {
lock.unlock();
// This should never happen
log.error("Tried to pick up task [{}] when it was already running!", head.getId());
continue;
} catch (InterruptedException e) {
lock.unlock();
log.error("Thread interrupted while trying to pick task for execution.", head.getId());
Thread.currentThread().interrupt();
continue;
}
log.debug(messageHandler.getName() + ": Get the message id:" + head.getId() + " message:" + message.toString());
Runner runner = new Runner(messageHandler, message, operation, head, lock);
tpe.execute(runner);
}
} catch (KeeperException e) {
if (e.code() == KeeperException.Code.SESSIONEXPIRED) {
log.warn("Overseer cannot talk to ZK");
return;
}
SolrException.log(log, "", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
} catch (Exception e) {
SolrException.log(log, "", e);
}
}
} finally {
this.close();
}
}
use of java.util.concurrent.SynchronousQueue in project rt.equinox.framework by eclipse.
the class TestModuleContainer method testUsesTimeout.
// DISABLE see bug 498064 @Test
public void testUsesTimeout() throws BundleException {
// Always want to go to zero threads when idle
int coreThreads = 0;
// use the number of processors - 1 because we use the current thread when rejected
int maxThreads = Math.max(Runtime.getRuntime().availableProcessors() - 1, 1);
// idle timeout; make it short to get rid of threads quickly after resolve
int idleTimeout = 5;
// use sync queue to force thread creation
BlockingQueue<Runnable> queue = new SynchronousQueue<Runnable>();
// try to name the threads with useful name
ThreadFactory threadFactory = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
// $NON-NLS-1$
Thread t = new Thread(r, "Resolver thread - UNIT TEST");
t.setDaemon(true);
return t;
}
};
// use a rejection policy that simply runs the task in the current thread once the max threads is reached
RejectedExecutionHandler rejectHandler = new RejectedExecutionHandler() {
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor exe) {
r.run();
}
};
ExecutorService executor = new ThreadPoolExecutor(coreThreads, maxThreads, idleTimeout, TimeUnit.SECONDS, queue, threadFactory, rejectHandler);
ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(1);
Map<String, String> configuration = new HashMap<String, String>();
configuration.put(EquinoxConfiguration.PROP_RESOLVER_BATCH_TIMEOUT, "5000");
Map<String, String> debugOpts = Collections.emptyMap();
DummyContainerAdaptor adaptor = new DummyContainerAdaptor(new DummyCollisionHook(false), configuration, new DummyResolverHookFactory(), new DummyDebugOptions(debugOpts));
adaptor.setResolverExecutor(executor);
adaptor.setTimeoutExecutor(timeoutExecutor);
ModuleContainer container = adaptor.getContainer();
for (int i = 1; i <= 1000; i++) {
for (Map<String, String> manifest : getUsesTimeoutManifests("test" + i)) {
installDummyModule(manifest, manifest.get(Constants.BUNDLE_SYMBOLICNAME), container);
}
}
ResolutionReport report = container.resolve(container.getModules(), true);
Assert.assertNull("Found resolution errors.", report.getResolutionException());
for (Module module : container.getModules()) {
Assert.assertEquals("Wrong state of module: " + module, State.RESOLVED, module.getState());
}
executor.shutdown();
timeoutExecutor.shutdown();
System.gc();
System.gc();
System.gc();
}
Aggregations