use of java.util.concurrent.PriorityBlockingQueue in project hbase by apache.
the class ReplicationSource method run.
@Override
public void run() {
// mark we are running now
this.sourceRunning = true;
try {
// start the endpoint, connect to the cluster
Service.State state = replicationEndpoint.start().get();
if (state != Service.State.RUNNING) {
LOG.warn("ReplicationEndpoint was not started. Exiting");
uninitialize();
return;
}
} catch (Exception ex) {
LOG.warn("Error starting ReplicationEndpoint, exiting", ex);
throw new RuntimeException(ex);
}
// get the WALEntryFilter from ReplicationEndpoint and add it to default filters
ArrayList<WALEntryFilter> filters = Lists.newArrayList((WALEntryFilter) new SystemTableWALEntryFilter());
WALEntryFilter filterFromEndpoint = this.replicationEndpoint.getWALEntryfilter();
if (filterFromEndpoint != null) {
filters.add(filterFromEndpoint);
}
this.walEntryFilter = new ChainWALEntryFilter(filters);
int sleepMultiplier = 1;
// delay this until we are in an asynchronous thread
while (this.isSourceActive() && this.peerClusterId == null) {
this.peerClusterId = replicationEndpoint.getPeerUUID();
if (this.isSourceActive() && this.peerClusterId == null) {
if (sleepForRetries("Cannot contact the peer's zk ensemble", sleepMultiplier)) {
sleepMultiplier++;
}
}
}
// peerClusterId value, which is the same as the source clusterId
if (clusterId.equals(peerClusterId) && !replicationEndpoint.canReplicateToSameCluster()) {
this.terminate("ClusterId " + clusterId + " is replicating to itself: peerClusterId " + peerClusterId + " which is not allowed by ReplicationEndpoint:" + replicationEndpoint.getClass().getName(), null, false);
this.manager.closeQueue(this);
return;
}
LOG.info("Replicating " + clusterId + " -> " + peerClusterId);
// start workers
for (Map.Entry<String, PriorityBlockingQueue<Path>> entry : queues.entrySet()) {
String walGroupId = entry.getKey();
PriorityBlockingQueue<Path> queue = entry.getValue();
final ReplicationSourceShipperThread worker = new ReplicationSourceShipperThread(walGroupId, queue, replicationQueueInfo, this);
ReplicationSourceShipperThread extant = workerThreads.putIfAbsent(walGroupId, worker);
if (extant != null) {
LOG.debug("Someone has beat us to start a worker thread for wal group " + walGroupId);
} else {
LOG.debug("Starting up worker for wal group " + walGroupId);
worker.startup();
}
}
}
use of java.util.concurrent.PriorityBlockingQueue in project elasticsearch by elastic.
the class PrioritizedExecutorsTests method testPriorityQueue.
public void testPriorityQueue() throws Exception {
PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<>();
List<Priority> priorities = Arrays.asList(Priority.values());
Collections.shuffle(priorities, random());
for (Priority priority : priorities) {
queue.add(priority);
}
Priority prevPriority = null;
while (!queue.isEmpty()) {
if (prevPriority == null) {
prevPriority = queue.poll();
} else {
assertThat(queue.poll().after(prevPriority), is(true));
}
}
}
use of java.util.concurrent.PriorityBlockingQueue in project ACS by ACS-Community.
the class CharacteristicComponentImpl method execute.
/**
* Execute action.
* If the maximum pool size or queue size is bounded,
* then it is possible for incoming execute requests to block.
* <code>BACIExecutor</code> uses default 'Run' blocking policy:
* The thread making the execute request runs the task itself. This policy helps guard against lockup.
* @param action action to execute.
* @return <code>true</code> on success.
*/
public boolean execute(PrioritizedRunnable action) {
try {
if (threadPool == null) {
// TODO make PriorityBlockingQueue bounded!!! (to MAX_REQUESTS)
// TODO should I use PooledExecutorWithWaitInNewThreadWhenBlocked...?
threadPool = new ThreadPoolExecutor(MAX_POOL_THREADS, MAX_POOL_THREADS, 1, TimeUnit.MINUTES, new PriorityBlockingQueue<Runnable>(MAX_REQUESTS, new PrioritizedRunnableComparator<Runnable>()), m_containerServices.getThreadFactory());
threadPool.allowCoreThreadTimeOut(true);
}
threadPool.execute(action);
return true;
} catch (Throwable th) {
return false;
}
}
use of java.util.concurrent.PriorityBlockingQueue in project hive by apache.
the class TestTaskExecutorService method testPreemptionQueueComparator.
@Test(timeout = 5000)
public void testPreemptionQueueComparator() throws InterruptedException {
TaskWrapper r1 = createTaskWrapper(createSubmitWorkRequestProto(1, 2, 100, 200), false, 100000);
TaskWrapper r2 = createTaskWrapper(createSubmitWorkRequestProto(2, 4, 200, 300), false, 100000);
TaskWrapper r3 = createTaskWrapper(createSubmitWorkRequestProto(3, 6, 300, 400), false, 1000000);
TaskWrapper r4 = createTaskWrapper(createSubmitWorkRequestProto(4, 8, 400, 500), false, 1000000);
BlockingQueue<TaskWrapper> queue = new PriorityBlockingQueue<>(4, new TaskExecutorService.PreemptionQueueComparator());
queue.offer(r1);
assertEquals(r1, queue.peek());
queue.offer(r2);
assertEquals(r1, queue.peek());
queue.offer(r3);
assertEquals(r1, queue.peek());
queue.offer(r4);
assertEquals(r1, queue.take());
assertEquals(r2, queue.take());
assertEquals(r3, queue.take());
assertEquals(r4, queue.take());
}
use of java.util.concurrent.PriorityBlockingQueue in project druid by druid-io.
the class PrioritizedListenableFutureTask method create.
public static PrioritizedExecutorService create(Lifecycle lifecycle, DruidProcessingConfig config) {
final PrioritizedExecutorService service = new PrioritizedExecutorService(new ThreadPoolExecutor(config.getNumThreads(), config.getNumThreads(), 0L, TimeUnit.MILLISECONDS, new PriorityBlockingQueue<Runnable>(), new ThreadFactoryBuilder().setDaemon(true).setNameFormat(config.getFormatString()).build()), config);
lifecycle.addHandler(new Lifecycle.Handler() {
@Override
public void start() throws Exception {
}
@Override
public void stop() {
service.shutdownNow();
}
});
return service;
}
Aggregations