use of org.apache.druid.query.lookup.namespace.CacheGenerator in project druid by druid-io.
the class NamespacedExtractorModuleTest method setUp.
@Before
public void setUp() throws Exception {
final Map<Class<? extends ExtractionNamespace>, CacheGenerator<?>> factoryMap = ImmutableMap.of(UriExtractionNamespace.class, new UriCacheGenerator(ImmutableMap.of("file", new LocalFileTimestampVersionFinder())), JdbcExtractionNamespace.class, new JdbcCacheGenerator());
lifecycle = new Lifecycle();
lifecycle.start();
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, factoryMap, new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
}
use of org.apache.druid.query.lookup.namespace.CacheGenerator in project druid by druid-io.
the class JdbcExtractionNamespaceTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
updates = new AtomicLong(0L);
updateLock = new ReentrantLock(true);
closer = Closer.create();
setupTeardownService = MoreExecutors.listeningDecorator(Execs.multiThreaded(2, "JDBCExtractionNamespaceTeardown--%s"));
final ListenableFuture<Handle> setupFuture = setupTeardownService.submit(new Callable<Handle>() {
@Override
public Handle call() {
final Handle handle = derbyConnectorRule.getConnector().getDBI().open();
Assert.assertEquals(0, handle.createStatement(StringUtils.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64), %s VARCHAR(64))", TABLE_NAME, TS_COLUMN, FILTER_COLUMN, KEY_NAME, VAL_NAME)).setQueryTimeout(1).execute());
handle.createStatement(StringUtils.format("TRUNCATE TABLE %s", TABLE_NAME)).setQueryTimeout(1).execute();
handle.commit();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
handle.createStatement("DROP TABLE " + TABLE_NAME).setQueryTimeout(1).execute();
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
handle.close();
}
});
try (Closeable closeable = new Closeable() {
@Override
public void close() {
future.cancel(true);
}
}) {
future.get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error closing handle", e);
}
}
});
closer.register(new Closeable() {
@Override
public void close() {
if (scheduler == null) {
return;
}
Assert.assertEquals(0, scheduler.getActiveEntries());
}
});
for (Map.Entry<String, String[]> entry : RENAMES.entrySet()) {
try {
String key = entry.getKey();
String value = entry.getValue()[0];
String filter = entry.getValue()[1];
insertValues(handle, key, value, filter, "2015-01-01 00:00:00");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, ImmutableMap.of(JdbcExtractionNamespace.class, new CacheGenerator<JdbcExtractionNamespace>() {
private final JdbcCacheGenerator delegate = new JdbcCacheGenerator();
@Override
public CacheScheduler.VersionedCache generateCache(final JdbcExtractionNamespace namespace, final CacheScheduler.EntryImpl<JdbcExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
updateLock.lockInterruptibly();
try {
log.debug("Running cache generator");
try {
return delegate.generateCache(namespace, id, lastVersion, scheduler);
} finally {
updates.incrementAndGet();
}
} finally {
updateLock.unlock();
}
}
}), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
try {
lifecycle.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
closer.register(new Closeable() {
@Override
public void close() throws IOException {
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
lifecycle.stop();
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() {
future.cancel(true);
}
}) {
future.get(30, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error stopping lifecycle", e);
}
}
});
return handle;
}
});
try (final Closeable ignore = () -> setupFuture.cancel(true)) {
handleRef = setupFuture.get(10, TimeUnit.SECONDS);
}
Assert.assertNotNull(handleRef);
}
use of org.apache.druid.query.lookup.namespace.CacheGenerator in project druid by druid-io.
the class CacheSchedulerTest method setUp.
@Before
public void setUp() throws Exception {
lifecycle = new Lifecycle();
lifecycle.start();
cacheManager = createCacheManager.apply(lifecycle);
final Path tmpDir = temporaryFolder.newFolder().toPath();
final CacheGenerator<UriExtractionNamespace> cacheGenerator = new CacheGenerator<UriExtractionNamespace>() {
@Override
public CacheScheduler.VersionedCache generateCache(final UriExtractionNamespace extractionNamespace, final CacheScheduler.EntryImpl<UriExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
// To make absolutely sure there is a unique currentTimeMillis
Thread.sleep(2);
String version = Long.toString(System.currentTimeMillis());
CacheScheduler.VersionedCache versionedCache = scheduler.createVersionedCache(id, version);
// Don't actually read off disk because TravisCI doesn't like that
versionedCache.getCache().put(KEY, VALUE);
return versionedCache;
}
};
scheduler = new CacheScheduler(new NoopServiceEmitter(), ImmutableMap.of(UriExtractionNamespace.class, cacheGenerator), cacheManager);
tmpFile = Files.createTempFile(tmpDir, "druidTestURIExtractionNS", ".dat").toFile();
try (OutputStream ostream = new FileOutputStream(tmpFile)) {
try (OutputStreamWriter out = new OutputStreamWriter(ostream, StandardCharsets.UTF_8)) {
// Since Travis sucks with disk related stuff, we override the disk reading part above.
// This is safe and should shake out any problem areas that accidentally read the file.
out.write("SHOULDN'T TRY TO PARSE");
out.flush();
}
}
}
Aggregations