use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.
the class AkkaKvStateLocationLookupServiceTest method testUnexpectedResponseType.
@Test
public void testUnexpectedResponseType() throws Exception {
TestingLeaderRetrievalService leaderRetrievalService = new TestingLeaderRetrievalService();
Queue<LookupKvStateLocation> received = new LinkedBlockingQueue<>();
AkkaKvStateLocationLookupService lookupService = new AkkaKvStateLocationLookupService(leaderRetrievalService, testActorSystem, TIMEOUT, new AkkaKvStateLocationLookupService.DisabledLookupRetryStrategyFactory());
lookupService.start();
// Create test actors with random leader session IDs
String expected = "unexpected-response-type";
ActorRef testActor = LookupResponseActor.create(received, null, expected);
String testActorAddress = AkkaUtils.getAkkaURL(testActorSystem, testActor);
leaderRetrievalService.notifyListener(testActorAddress, null);
try {
Await.result(lookupService.getKvStateLookupInfo(new JobID(), "spicy"), TIMEOUT);
fail("Did not throw expected Exception");
} catch (Throwable ignored) {
// Expected
}
}
use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.
the class NMClientAsyncImpl method serviceStart.
@Override
protected void serviceStart() throws Exception {
client.start();
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
// Start with a default core-pool size and change it dynamically.
int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventDispatcherThread = new Thread() {
@Override
public void run() {
ContainerEvent event = null;
Set<String> allNodes = new HashSet<String>();
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = events.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, thread interrupted", e);
}
return;
}
allNodes.add(event.getNodeId().toString());
int threadPoolSize = threadPool.getCorePoolSize();
// limit yet.
if (threadPoolSize != maxThreadPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int nodeNum = allNodes.size();
int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
if (threadPoolSize < idealThreadPoolSize) {
// Bump up the pool size to idealThreadPoolSize +
// INITIAL_POOL_SIZE, the later is just a buffer so we are not
// always increasing the pool-size
int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
threadPool.setCorePoolSize(newThreadPoolSize);
}
}
// the events from the queue are handled in parallel with a thread
// pool
threadPool.execute(getContainerEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventDispatcherThread.setName("Container Event Dispatcher");
eventDispatcherThread.setDaemon(false);
eventDispatcherThread.start();
super.serviceStart();
}
use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.
the class CommitterEventHandler method serviceStart.
@Override
protected void serviceStart() throws Exception {
ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
if (jobClassLoader != null) {
// if the job classloader is enabled, we need to use the job classloader
// as the thread context classloader (TCCL) of these threads in case the
// committer needs to load another class via TCCL
ThreadFactory backingTf = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setContextClassLoader(jobClassLoader);
return thread;
}
};
tfBuilder.setThreadFactory(backingTf);
}
ThreadFactory tf = tfBuilder.build();
launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
CommitterEvent event = null;
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(new EventProcessor(event));
}
}
});
eventHandlingThread.setName("CommitterEvent Handler");
eventHandlingThread.start();
super.serviceStart();
}
use of java.util.concurrent.LinkedBlockingQueue in project Mycat-Server by MyCATApache.
the class DataMigrator method migrateData.
private void migrateData() throws SQLException {
executor = new ThreadPoolExecutor(margs.getThreadCount(), margs.getThreadCount(), 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadPoolExecutor.CallerRunsPolicy());
for (TableMigrateInfo table : migrateTables) {
if (!table.isError()) {
//忽略已出错的拆分表
List<DataNodeMigrateInfo> detailList = table.getDataNodesDetail();
for (DataNodeMigrateInfo info : detailList) {
executor.execute(new DataMigrateRunner(table, info.getSrc(), info.getTarget(), table.getTableName(), info.getTempFile()));
}
}
}
executor.shutdown();
while (true) {
if (executor.isTerminated()) {
break;
}
try {
Thread.sleep(200);
} catch (InterruptedException e) {
LOGGER.error("error", e);
}
}
}
use of java.util.concurrent.LinkedBlockingQueue in project qi4j-sdk by Qi4j.
the class SchedulerMixin method activateService.
@Override
public void activateService() throws Exception {
// Handle configuration defaults
SchedulerConfiguration configuration = config.get();
Integer workersCount = configuration.workersCount().get();
Integer workQueueSize = configuration.workQueueSize().get();
if (workersCount == null) {
workersCount = DEFAULT_WORKERS_COUNT;
LOGGER.debug("Workers count absent from configuration, falled back to default: {} workers", DEFAULT_WORKERS_COUNT);
}
if (workQueueSize == null) {
workQueueSize = DEFAULT_WORKQUEUE_SIZE;
LOGGER.debug("WorkQueue size absent from configuration, falled back to default: {}", DEFAULT_WORKQUEUE_SIZE);
}
int corePoolSize = 2;
if (workersCount > 4) {
corePoolSize = workersCount / 4;
}
// Throws IllegalArgument if corePoolSize or keepAliveTime less than zero, or if workersCount less than or equal to zero, or if corePoolSize greater than workersCount.
taskExecutor = new ThreadPoolExecutor(corePoolSize, workersCount, 0, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(workQueueSize), threadFactory, rejectionHandler);
taskExecutor.prestartAllCoreThreads();
managementExecutor = new ScheduledThreadPoolExecutor(2, threadFactory, rejectionHandler);
loadSchedules();
LOGGER.debug("Activated");
}
Aggregations