use of java.util.concurrent.BlockingQueue in project HugeCollections-OLD by peter-lawrey.
the class LocalJSR166TestCase method testEmptyFull.
/**
* Queue transitions from empty to full when elements added
*/
@Test
public void testEmptyFull() {
BlockingQueue q = new LocalConcurrentBlockingObjectQueue(2);
assertTrue(q.isEmpty());
assertEquals(2, q.remainingCapacity());
q.add(one);
assertFalse(q.isEmpty());
q.add(two);
assertFalse(q.isEmpty());
assertEquals(0, q.remainingCapacity());
assertFalse(q.offer(three));
}
use of java.util.concurrent.BlockingQueue in project HugeCollections-OLD by peter-lawrey.
the class LocalJSR166TestCase method testDrainToWithActivePut.
/**
* drainTo empties full queue, unblocking a waiting put.
*/
@Ignore
@Test
public void testDrainToWithActivePut() throws InterruptedException {
final BlockingQueue q = populatedQueue(SIZE);
Thread t = new Thread(new CheckedRunnable() {
public void realRun() throws InterruptedException {
q.put(new Integer(SIZE + 1));
}
});
t.start();
ArrayList l = new ArrayList();
q.drainTo(l);
assertTrue(l.size() >= SIZE);
for (int i = 0; i < SIZE; ++i) assertEquals(l.get(i), new Integer(i));
t.join();
assertTrue(q.size() + l.size() >= SIZE);
}
use of java.util.concurrent.BlockingQueue in project opennms by OpenNMS.
the class Scheduler method run.
/**
* The main method of the scheduler. This method is responsible for
* checking the runnable queues for ready objects and then enqueuing them
* into the thread pool for execution.
*/
@Override
public void run() {
synchronized (this) {
m_status = RUNNING;
}
LOG.debug("run: scheduler running");
//
for (; ; ) {
//
synchronized (this) {
if (m_status != RUNNING && m_status != PAUSED && m_status != PAUSE_PENDING && m_status != RESUME_PENDING) {
LOG.debug("run: status = {}, time to exit", m_status);
break;
}
if (m_scheduled == 0) {
try {
LOG.debug("run: no interfaces scheduled, waiting...");
wait();
} catch (InterruptedException ex) {
break;
}
}
}
// cycle through the queues checking for
// what's ready to run. The queues are keyed
// by the interval, but the mapped elements
// are peekable fifo queues.
//
// cycle through the queues checking for
// what's ready to run. The queues are keyed
// by the interval, but the mapped elements
// are peekable fifo queues.
//
int runned = 0;
synchronized (m_queues) {
//
for (Entry<Long, BlockingQueue<ReadyRunnable>> entry : m_queues.entrySet()) {
// Peak for Runnable objects until
// there are no more ready runnables
//
// Also, only go through each queue once!
// if we didn't add a count then it would
// be possible to starve other queues.
//
Long key = entry.getKey();
BlockingQueue<ReadyRunnable> in = m_queues.get(key);
if (in == null || in.isEmpty()) {
continue;
}
ReadyRunnable readyRun = null;
int maxLoops = in.size();
do {
try {
readyRun = in.peek();
if (readyRun != null) {
// Pop the interface/readyRunnable from the
// queue for execution.
//
in.take();
if (readyRun.isReady()) {
LOG.debug("run: runnable {}, executing", readyRun.getInfo());
// Add runnable to the execution queue
m_runner.execute(readyRun);
++runned;
// Increment the execution counter
++m_numTasksExecuted;
// Thread Pool Statistics
if (m_runner instanceof ThreadPoolExecutor) {
ThreadPoolExecutor e = (ThreadPoolExecutor) m_runner;
String ratio = String.format("%.3f", e.getTaskCount() > 0 ? new Double(e.getCompletedTaskCount()) / new Double(e.getTaskCount()) : 0);
LOG.debug("thread pool statistics: activeCount={}, taskCount={}, completedTaskCount={}, completedRatio={}, poolSize={}", e.getActiveCount(), e.getTaskCount(), e.getCompletedTaskCount(), ratio, e.getPoolSize());
}
} else {
in.add(readyRun);
}
}
} catch (InterruptedException ex) {
// jump all the way out
return;
}
} while (--maxLoops > 0);
}
}
//
synchronized (this) {
m_scheduled -= runned;
if (runned == 0) {
try {
wait(1000);
} catch (InterruptedException ex) {
// exit for loop
break;
}
}
}
}
// end for(;;)
LOG.debug("run: scheduler exiting, state = STOPPED");
synchronized (this) {
m_status = STOPPED;
}
}
use of java.util.concurrent.BlockingQueue in project geode by apache.
the class ConnectionTable method createThreadPoolForIO.
private Executor createThreadPoolForIO(boolean conserveSockets) {
Executor executor = null;
final ThreadGroup connectionRWGroup = LoggingThreadGroup.createThreadGroup("P2P Reader Threads", logger);
if (conserveSockets) {
executor = new Executor() {
@Override
public void execute(Runnable command) {
Thread th = new Thread(connectionRWGroup, command);
th.setDaemon(true);
th.start();
}
};
} else {
BlockingQueue synchronousQueue = new SynchronousQueue();
ThreadFactory tf = new ThreadFactory() {
public Thread newThread(final Runnable command) {
Thread thread = new Thread(connectionRWGroup, command);
thread.setDaemon(true);
return thread;
}
};
executor = new ThreadPoolExecutor(1, Integer.MAX_VALUE, READER_POOL_KEEP_ALIVE_TIME, TimeUnit.SECONDS, synchronousQueue, tf);
}
return executor;
}
use of java.util.concurrent.BlockingQueue in project geode by apache.
the class ParallelGatewaySenderQueue method put.
public boolean put(Object object) throws InterruptedException, CacheException {
final boolean isDebugEnabled = logger.isDebugEnabled();
boolean putDone = false;
// Can this region ever be null? Should we work with regionName and not with region
// instance.
// It can't be as put is happeing on the region and its still under process
GatewaySenderEventImpl value = (GatewaySenderEventImpl) object;
boolean isDREvent = isDREvent(value);
Region region = value.getRegion();
String regionPath = null;
if (isDREvent) {
regionPath = region.getFullPath();
} else {
regionPath = ColocationHelper.getLeaderRegion((PartitionedRegion) region).getFullPath();
}
if (isDebugEnabled) {
logger.debug("Put is for the region {}", region);
}
if (!this.userRegionNameToshadowPRMap.containsKey(regionPath)) {
if (isDebugEnabled) {
logger.debug("The userRegionNameToshadowPRMap is {}", userRegionNameToshadowPRMap);
}
logger.warn(LocalizedMessage.create(LocalizedStrings.NOT_QUEUING_AS_USERPR_IS_NOT_YET_CONFIGURED, value));
// does not put into queue
return false;
}
PartitionedRegion prQ = this.userRegionNameToshadowPRMap.get(regionPath);
int bucketId = value.getBucketId();
Object key = null;
if (!isDREvent) {
key = value.getShadowKey();
if ((Long) key == -1) {
// through listener, so return.
if (isDebugEnabled) {
logger.debug("ParallelGatewaySenderOrderedQueue not putting key {} : Value : {}", key, value);
}
// does not put into queue
return false;
}
} else {
key = value.getEventId();
}
if (isDebugEnabled) {
logger.debug("ParallelGatewaySenderOrderedQueue putting key {} : Value : {}", key, value);
}
AbstractBucketRegionQueue brq = (AbstractBucketRegionQueue) prQ.getDataStore().getLocalBucketById(bucketId);
try {
if (brq == null) {
// Set the threadInitLevel to BEFORE_INITIAL_IMAGE.
int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
try {
// Full path of the bucket:
final String bucketFullPath = Region.SEPARATOR + PartitionedRegionHelper.PR_ROOT_REGION_NAME + Region.SEPARATOR + prQ.getBucketName(bucketId);
brq = (AbstractBucketRegionQueue) prQ.getCache().getRegionByPath(bucketFullPath);
if (isDebugEnabled) {
logger.debug("ParallelGatewaySenderOrderedQueue : The bucket in the cache is bucketRegionName : {} bucket : {}", bucketFullPath, brq);
}
if (brq != null) {
brq.getInitializationLock().readLock().lock();
try {
putIntoBucketRegionQueue(brq, key, value);
putDone = true;
} finally {
brq.getInitializationLock().readLock().unlock();
}
} else if (isDREvent) {
// in case of DR with PGS, if shadow bucket is not found event after
// above search then it means that bucket is not intended for this
// node. So lets not add this event in temp queue event as we are
// doing it for PRevent
// does not put onto the queue
} else {
// In that case we don't want to store this event.
if (((PartitionedRegion) prQ.getColocatedWithRegion()).getRegionAdvisor().getBucketAdvisor(bucketId).getShadowBucketDestroyed()) {
if (isDebugEnabled) {
logger.debug("ParallelGatewaySenderOrderedQueue not putting key {} : Value : {} as shadowPR bucket is destroyed.", key, value);
}
// does not put onto the queue
} else {
/*
* This is to prevent data loss, in the scenario when bucket is not available in the
* cache but we know that it will be created.
*/
BlockingQueue tempQueue = null;
synchronized (this.bucketToTempQueueMap) {
tempQueue = this.bucketToTempQueueMap.get(bucketId);
if (tempQueue == null) {
tempQueue = new LinkedBlockingQueue();
this.bucketToTempQueueMap.put(bucketId, tempQueue);
}
}
synchronized (tempQueue) {
brq = (AbstractBucketRegionQueue) prQ.getCache().getRegionByPath(bucketFullPath);
if (brq != null) {
brq.getInitializationLock().readLock().lock();
try {
putIntoBucketRegionQueue(brq, key, value);
putDone = true;
} finally {
brq.getInitializationLock().readLock().unlock();
}
} else {
tempQueue.add(value);
putDone = true;
// For debugging purpose.
if (isDebugEnabled) {
logger.debug("The value {} is enqueued to the tempQueue for the BucketRegionQueue.", value);
}
}
}
}
}
} finally {
LocalRegion.setThreadInitLevelRequirement(oldLevel);
}
} else {
boolean thisbucketDestroyed = false;
if (!isDREvent) {
thisbucketDestroyed = ((PartitionedRegion) prQ.getColocatedWithRegion()).getRegionAdvisor().getBucketAdvisor(bucketId).getShadowBucketDestroyed() || brq.isDestroyed();
} else {
thisbucketDestroyed = brq.isDestroyed();
}
if (!thisbucketDestroyed) {
putIntoBucketRegionQueue(brq, key, value);
putDone = true;
} else {
if (isDebugEnabled) {
logger.debug("ParallelGatewaySenderOrderedQueue not putting key {} : Value : {} as shadowPR bucket is destroyed.", key, value);
}
// does not put onto the queue
}
}
} finally {
notifyEventProcessorIfRequired();
}
return putDone;
}
Aggregations