use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class BaseJettyTest method setup.
@Before
public void setup() throws Exception {
setProperties();
Injector injector = setupInjector();
final DruidNode node = injector.getInstance(Key.get(DruidNode.class, Self.class));
port = node.getPort();
lifecycle = injector.getInstance(Lifecycle.class);
lifecycle.start();
ClientHolder holder = injector.getInstance(ClientHolder.class);
server = injector.getInstance(Server.class);
client = holder.getClient();
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class NamespacedExtractorModuleTest method setUp.
@Before
public void setUp() throws Exception {
final Map<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>> factoryMap = ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>>of(URIExtractionNamespace.class, new URIExtractionNamespaceCacheFactory(ImmutableMap.<String, SearchableVersionedDataFinder>of("file", new LocalFileTimestampVersionFinder())), JDBCExtractionNamespace.class, new JDBCExtractionNamespaceCacheFactory());
lifecycle = new Lifecycle();
lifecycle.start();
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, factoryMap, new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter));
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class NamespaceExtractionCacheManagersTest method setUp.
@Before
public void setUp() throws Exception {
lifecycle = new Lifecycle();
lifecycle.start();
manager = createCacheManager.apply(lifecycle);
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class ChainedExecutionQueryRunnerTest method testQueryTimeout.
@Test(timeout = 60000)
public void testQueryTimeout() throws Exception {
ExecutorService exec = PrioritizedExecutorService.create(new Lifecycle(), new DruidProcessingConfig() {
@Override
public String getFormatString() {
return "test";
}
@Override
public int getNumThreads() {
return 2;
}
});
final CountDownLatch queriesStarted = new CountDownLatch(2);
final CountDownLatch queriesInterrupted = new CountDownLatch(2);
final CountDownLatch queryIsRegistered = new CountDownLatch(1);
Capture<ListenableFuture> capturedFuture = new Capture<>();
QueryWatcher watcher = EasyMock.createStrictMock(QueryWatcher.class);
watcher.registerQuery(EasyMock.<Query>anyObject(), EasyMock.and(EasyMock.<ListenableFuture>anyObject(), EasyMock.capture(capturedFuture)));
EasyMock.expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
queryIsRegistered.countDown();
return null;
}
}).once();
EasyMock.replay(watcher);
ArrayBlockingQueue<DyingQueryRunner> interrupted = new ArrayBlockingQueue<>(3);
Set<DyingQueryRunner> runners = Sets.newHashSet(new DyingQueryRunner(queriesStarted, queriesInterrupted, interrupted), new DyingQueryRunner(queriesStarted, queriesInterrupted, interrupted), new DyingQueryRunner(queriesStarted, queriesInterrupted, interrupted));
ChainedExecutionQueryRunner chainedRunner = new ChainedExecutionQueryRunner<>(exec, watcher, Lists.<QueryRunner<Integer>>newArrayList(runners));
HashMap<String, Object> context = new HashMap<String, Object>();
final Sequence seq = chainedRunner.run(Druids.newTimeseriesQueryBuilder().dataSource("test").intervals("2014/2015").aggregators(Lists.<AggregatorFactory>newArrayList(new CountAggregatorFactory("count"))).context(ImmutableMap.<String, Object>of(QueryContextKeys.TIMEOUT, 100, "queryId", "test")).build(), context);
Future resultFuture = Executors.newFixedThreadPool(1).submit(new Runnable() {
@Override
public void run() {
Sequences.toList(seq, Lists.newArrayList());
}
});
// wait for query to register and start
queryIsRegistered.await();
queriesStarted.await();
Assert.assertTrue(capturedFuture.hasCaptured());
ListenableFuture future = capturedFuture.getValue();
// wait for query to time out
QueryInterruptedException cause = null;
try {
resultFuture.get();
} catch (ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof QueryInterruptedException);
Assert.assertEquals("Query timeout", ((QueryInterruptedException) e.getCause()).getErrorCode());
cause = (QueryInterruptedException) e.getCause();
}
queriesInterrupted.await();
Assert.assertNotNull(cause);
Assert.assertTrue(future.isCancelled());
DyingQueryRunner interrupted1 = interrupted.poll();
synchronized (interrupted1) {
Assert.assertTrue("runner 1 started", interrupted1.hasStarted);
Assert.assertTrue("runner 1 interrupted", interrupted1.interrupted);
}
DyingQueryRunner interrupted2 = interrupted.poll();
synchronized (interrupted2) {
Assert.assertTrue("runner 2 started", interrupted2.hasStarted);
Assert.assertTrue("runner 2 interrupted", interrupted2.interrupted);
}
runners.remove(interrupted1);
runners.remove(interrupted2);
DyingQueryRunner remainingRunner = runners.iterator().next();
synchronized (remainingRunner) {
Assert.assertTrue("runner 3 should be interrupted or not have started", !remainingRunner.hasStarted || remainingRunner.interrupted);
}
Assert.assertFalse("runner 1 not completed", interrupted1.hasCompleted);
Assert.assertFalse("runner 2 not completed", interrupted2.hasCompleted);
Assert.assertFalse("runner 3 not completed", remainingRunner.hasCompleted);
EasyMock.verify(watcher);
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class LifecycleModule method getLifecycle.
@Provides
@LazySingleton
public Lifecycle getLifecycle(final Injector injector) {
final Key<Set<KeyHolder>> keyHolderKey = Key.get(new TypeLiteral<Set<KeyHolder>>() {
}, Names.named("lifecycle"));
final Set<KeyHolder> eagerClasses = injector.getInstance(keyHolderKey);
Lifecycle lifecycle = new Lifecycle() {
@Override
public void start() throws Exception {
for (KeyHolder<?> holder : eagerClasses) {
// Pull the key so as to "eagerly" load up the class.
injector.getInstance(holder.getKey());
}
super.start();
}
};
scope.setLifecycle(lifecycle);
lastScope.setLifecycle(lifecycle);
return lifecycle;
}
Aggregations