use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.
the class AkkaKvStateLocationLookupServiceTest method testUnexpectedResponseType.
@Test
public void testUnexpectedResponseType() throws Exception {
TestingLeaderRetrievalService leaderRetrievalService = new TestingLeaderRetrievalService();
Queue<LookupKvStateLocation> received = new LinkedBlockingQueue<>();
AkkaKvStateLocationLookupService lookupService = new AkkaKvStateLocationLookupService(leaderRetrievalService, testActorSystem, TIMEOUT, new AkkaKvStateLocationLookupService.DisabledLookupRetryStrategyFactory());
lookupService.start();
// Create test actors with random leader session IDs
String expected = "unexpected-response-type";
ActorRef testActor = LookupResponseActor.create(received, null, expected);
String testActorAddress = AkkaUtils.getAkkaURL(testActorSystem, testActor);
leaderRetrievalService.notifyListener(testActorAddress, null);
try {
Await.result(lookupService.getKvStateLookupInfo(new JobID(), "spicy"), TIMEOUT);
fail("Did not throw expected Exception");
} catch (Throwable ignored) {
// Expected
}
}
use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.
the class TestCallQueueManager method testSchedulerWithoutFCQ.
@Test
public void testSchedulerWithoutFCQ() throws InterruptedException {
Configuration conf = new Configuration();
// Test DecayedRpcScheduler without FCQ
// Ensure the default LinkedBlockingQueue can work with DecayedRpcScheduler
final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0";
final String schedulerClassName = "org.apache.hadoop.ipc.DecayRpcScheduler";
conf.setStrings(ns + "." + CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY, schedulerClassName);
Class<? extends BlockingQueue<FakeCall>> queue = (Class<? extends BlockingQueue<FakeCall>>) getQueueClass(ns, conf);
assertTrue(queue.getCanonicalName().equals("java.util.concurrent." + "LinkedBlockingQueue"));
manager = new CallQueueManager<FakeCall>(queue, Server.getSchedulerClass(ns, conf), false, 3, "", conf);
// LinkedBlockingQueue with a capacity of 3 can put 3 calls
assertCanPut(manager, 3, 3);
// LinkedBlockingQueue with a capacity of 3 can't put 1 more call
assertCanPut(manager, 0, 1);
}
use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.
the class ContainerLauncherImpl method serviceStart.
protected void serviceStart() throws Exception {
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new HadoopThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventHandlingThread = new Thread() {
@Override
public void run() {
ContainerLauncherEvent event = null;
Set<String> allNodes = new HashSet<String>();
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
allNodes.add(event.getContainerMgrAddress());
int poolSize = launcherPool.getCorePoolSize();
// maximum limit yet.
if (poolSize != limitOnPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int numNodes = allNodes.size();
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize < idealPoolSize) {
// Bump up the pool size to idealPoolSize+initialPoolSize, the
// later is just a buffer so we are not always increasing the
// pool-size
int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize);
LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
launcherPool.setCorePoolSize(newPoolSize);
}
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(createEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventHandlingThread.setName("ContainerLauncher Event Handler");
eventHandlingThread.start();
super.serviceStart();
}
use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.
the class CommitterEventHandler method serviceStart.
@Override
protected void serviceStart() throws Exception {
ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
if (jobClassLoader != null) {
// if the job classloader is enabled, we need to use the job classloader
// as the thread context classloader (TCCL) of these threads in case the
// committer needs to load another class via TCCL
ThreadFactory backingTf = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setContextClassLoader(jobClassLoader);
return thread;
}
};
tfBuilder.setThreadFactory(backingTf);
}
ThreadFactory tf = tfBuilder.build();
launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
CommitterEvent event = null;
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(new EventProcessor(event));
}
}
});
eventHandlingThread.setName("CommitterEvent Handler");
eventHandlingThread.start();
super.serviceStart();
}
use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.
the class TestAsyncDispatcher method testDrainDispatcherDrainEventsOnStop.
// Test if drain dispatcher drains events on stop.
@SuppressWarnings({ "rawtypes" })
@Test(timeout = 10000)
public void testDrainDispatcherDrainEventsOnStop() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 2000);
BlockingQueue<Event> queue = new LinkedBlockingQueue<Event>();
DrainDispatcher disp = new DrainDispatcher(queue);
disp.init(conf);
disp.register(DummyType.class, new DummyHandler());
disp.setDrainEventsOnStop();
disp.start();
disp.waitForEventThreadToWait();
dispatchDummyEvents(disp, 2);
disp.close();
assertEquals(0, queue.size());
}
Aggregations