use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class JDBCExtractionNamespaceTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
updates = new AtomicLong(0L);
updateLock = new ReentrantLock(true);
closer = Closer.create();
setupTeardownService = MoreExecutors.listeningDecorator(Execs.multiThreaded(2, "JDBCExtractionNamespaceTeardown--%s"));
final ListenableFuture<Handle> setupFuture = setupTeardownService.submit(new Callable<Handle>() {
@Override
public Handle call() {
final Handle handle = derbyConnectorRule.getConnector().getDBI().open();
Assert.assertEquals(0, handle.createStatement(String.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64))", tableName, tsColumn_, keyName, valName)).setQueryTimeout(1).execute());
handle.createStatement(String.format("TRUNCATE TABLE %s", tableName)).setQueryTimeout(1).execute();
handle.commit();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
handle.createStatement("DROP TABLE " + tableName).setQueryTimeout(1).execute();
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
handle.close();
}
});
try (Closeable closeable = new Closeable() {
@Override
public void close() throws IOException {
future.cancel(true);
}
}) {
future.get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error closing handle", e);
}
}
});
closer.register(new Closeable() {
@Override
public void close() throws IOException {
if (scheduler == null) {
return;
}
Assert.assertEquals(0, scheduler.getActiveEntries());
}
});
for (Map.Entry<String, String> entry : renames.entrySet()) {
try {
insertValues(handle, entry.getKey(), entry.getValue(), "2015-01-01 00:00:00");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>>of(JDBCExtractionNamespace.class, new ExtractionNamespaceCacheFactory<JDBCExtractionNamespace>() {
private final JDBCExtractionNamespaceCacheFactory delegate = new JDBCExtractionNamespaceCacheFactory();
@Override
public CacheScheduler.VersionedCache populateCache(final JDBCExtractionNamespace namespace, final CacheScheduler.EntryImpl<JDBCExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
updateLock.lockInterruptibly();
try {
log.debug("Running cache populator");
try {
return delegate.populateCache(namespace, id, lastVersion, scheduler);
} finally {
updates.incrementAndGet();
}
} finally {
updateLock.unlock();
}
}
}), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter));
try {
lifecycle.start();
} catch (Exception e) {
throw Throwables.propagate(e);
}
closer.register(new Closeable() {
@Override
public void close() throws IOException {
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
lifecycle.stop();
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() throws IOException {
future.cancel(true);
}
}) {
future.get(30, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error stopping lifecycle", e);
}
}
});
return handle;
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() throws IOException {
if (!setupFuture.isDone() && !setupFuture.cancel(true) && !setupFuture.isDone()) {
throw new IOException("Unable to stop future");
}
}
}) {
handleRef = setupFuture.get(10, TimeUnit.SECONDS);
}
Assert.assertNotNull(handleRef);
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class PrioritizedExecutorServiceTest method setUp.
@Before
public void setUp() throws Exception {
exec = PrioritizedExecutorService.create(new Lifecycle(), new DruidProcessingConfig() {
@Override
public String getFormatString() {
return "test";
}
@Override
public int getNumThreads() {
return 1;
}
@Override
public boolean isFifo() {
return useFifo;
}
});
latch = new CountDownLatch(1);
finishLatch = new CountDownLatch(3);
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class AsyncQueryForwardingServletTest method setup.
@Before
public void setup() throws Exception {
setProperties();
Injector injector = setupInjector();
final DruidNode node = injector.getInstance(Key.get(DruidNode.class, Self.class));
port = node.getPort();
port1 = SocketUtil.findOpenPortFrom(port + 1);
port2 = SocketUtil.findOpenPortFrom(port1 + 1);
lifecycle = injector.getInstance(Lifecycle.class);
lifecycle.start();
ClientHolder holder = injector.getInstance(ClientHolder.class);
client = holder.getClient();
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class GuiceRunnable method initLifecycle.
public Lifecycle initLifecycle(Injector injector) {
try {
LogLevelAdjuster.register();
final Lifecycle lifecycle = injector.getInstance(Lifecycle.class);
final StartupLoggingConfig startupLoggingConfig = injector.getInstance(StartupLoggingConfig.class);
log.info("Starting up with processors[%,d], memory[%,d].", Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().totalMemory());
if (startupLoggingConfig.isLogProperties()) {
final Set<String> maskProperties = Sets.newHashSet(startupLoggingConfig.getMaskProperties());
final Properties props = injector.getInstance(Properties.class);
for (String propertyName : Ordering.natural().sortedCopy(props.stringPropertyNames())) {
String property = props.getProperty(propertyName);
for (String masked : maskProperties) {
if (propertyName.contains(masked)) {
property = "<masked>";
break;
}
}
log.info("* %s: %s", propertyName, property);
}
}
try {
lifecycle.start();
} catch (Throwable t) {
log.error(t, "Error when starting up. Failing.");
System.exit(1);
}
return lifecycle;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of io.druid.java.util.common.lifecycle.Lifecycle in project druid by druid-io.
the class ServerRunnable method run.
@Override
public void run() {
final Injector injector = makeInjector();
final Lifecycle lifecycle = initLifecycle(injector);
try {
lifecycle.join();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
Aggregations