use of io.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class NamespaceExtractionCacheManagerExecutorsTest method setUp.
@Before
public void setUp() throws Exception {
lifecycle = new Lifecycle();
lifecycle.start();
cacheManager = createCacheManager.apply(lifecycle);
final Path tmpDir = temporaryFolder.newFolder().toPath();
final ExtractionNamespaceCacheFactory<URIExtractionNamespace> cachePopulator = new ExtractionNamespaceCacheFactory<URIExtractionNamespace>() {
@Override
public CacheScheduler.VersionedCache populateCache(final URIExtractionNamespace extractionNamespace, final CacheScheduler.EntryImpl<URIExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
// To make absolutely sure there is a unique currentTimeMillis
Thread.sleep(2);
String version = Long.toString(System.currentTimeMillis());
CacheScheduler.VersionedCache versionedCache = scheduler.createVersionedCache(id, version);
// Don't actually read off disk because TravisCI doesn't like that
versionedCache.getCache().put(KEY, VALUE);
return versionedCache;
}
};
scheduler = new CacheScheduler(new NoopServiceEmitter(), ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>>of(URIExtractionNamespace.class, cachePopulator), cacheManager);
tmpFile = Files.createTempFile(tmpDir, "druidTestURIExtractionNS", ".dat").toFile();
try (OutputStream ostream = new FileOutputStream(tmpFile)) {
try (OutputStreamWriter out = new OutputStreamWriter(ostream)) {
// Since Travis sucks with disk related stuff, we override the disk reading part above.
// This is safe and should shake out any problem areas that accidentally read the file.
out.write("SHOULDN'T TRY TO PARSE");
out.flush();
}
}
}
use of io.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class StaticMapExtractionNamespaceCacheFactoryTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
lifecycle.start();
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, Collections.<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>>emptyMap(), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter));
}
use of io.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class URIExtractionNamespaceCacheFactoryTest method getParameters.
@Parameterized.Parameters(name = "{0}")
public static Iterable<Object[]> getParameters() throws NoSuchMethodException {
final List<Object[]> compressionParams = ImmutableList.of(new Object[] { ".dat", new Function<File, OutputStream>() {
@Nullable
@Override
public OutputStream apply(@Nullable File outFile) {
try {
return new FileOutputStream(outFile);
} catch (IOException ex) {
throw Throwables.propagate(ex);
}
}
} }, new Object[] { ".gz", new Function<File, OutputStream>() {
@Nullable
@Override
public OutputStream apply(@Nullable File outFile) {
try {
final FileOutputStream fos = new FileOutputStream(outFile);
return new GZIPOutputStream(fos) {
@Override
public void close() throws IOException {
try {
super.close();
} finally {
fos.close();
}
}
};
} catch (IOException ex) {
throw Throwables.propagate(ex);
}
}
} });
final List<Function<Lifecycle, NamespaceExtractionCacheManager>> cacheManagerCreators = ImmutableList.of(new Function<Lifecycle, NamespaceExtractionCacheManager>() {
@Override
public NamespaceExtractionCacheManager apply(Lifecycle lifecycle) {
return new OnHeapNamespaceExtractionCacheManager(lifecycle, new NoopServiceEmitter());
}
}, new Function<Lifecycle, NamespaceExtractionCacheManager>() {
@Override
public NamespaceExtractionCacheManager apply(Lifecycle lifecycle) {
return new OffHeapNamespaceExtractionCacheManager(lifecycle, new NoopServiceEmitter());
}
});
return new Iterable<Object[]>() {
@Override
public Iterator<Object[]> iterator() {
return new Iterator<Object[]>() {
Iterator<Object[]> compressionIt = compressionParams.iterator();
Iterator<Function<Lifecycle, NamespaceExtractionCacheManager>> cacheManagerCreatorsIt = cacheManagerCreators.iterator();
Object[] compressions = compressionIt.next();
@Override
public boolean hasNext() {
return compressionIt.hasNext() || cacheManagerCreatorsIt.hasNext();
}
@Override
public Object[] next() {
if (cacheManagerCreatorsIt.hasNext()) {
Function<Lifecycle, NamespaceExtractionCacheManager> cacheManagerCreator = cacheManagerCreatorsIt.next();
return new Object[] { compressions[0], compressions[1], cacheManagerCreator };
} else {
cacheManagerCreatorsIt = cacheManagerCreators.iterator();
compressions = compressionIt.next();
return next();
}
}
@Override
public void remove() {
throw new UOE("Cannot remove");
}
};
}
};
}
use of io.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class JDBCExtractionNamespaceTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
updates = new AtomicLong(0L);
updateLock = new ReentrantLock(true);
closer = Closer.create();
setupTeardownService = MoreExecutors.listeningDecorator(Execs.multiThreaded(2, "JDBCExtractionNamespaceTeardown--%s"));
final ListenableFuture<Handle> setupFuture = setupTeardownService.submit(new Callable<Handle>() {
@Override
public Handle call() {
final Handle handle = derbyConnectorRule.getConnector().getDBI().open();
Assert.assertEquals(0, handle.createStatement(String.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64))", tableName, tsColumn_, keyName, valName)).setQueryTimeout(1).execute());
handle.createStatement(String.format("TRUNCATE TABLE %s", tableName)).setQueryTimeout(1).execute();
handle.commit();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
handle.createStatement("DROP TABLE " + tableName).setQueryTimeout(1).execute();
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
handle.close();
}
});
try (Closeable closeable = new Closeable() {
@Override
public void close() throws IOException {
future.cancel(true);
}
}) {
future.get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error closing handle", e);
}
}
});
closer.register(new Closeable() {
@Override
public void close() throws IOException {
if (scheduler == null) {
return;
}
Assert.assertEquals(0, scheduler.getActiveEntries());
}
});
for (Map.Entry<String, String> entry : renames.entrySet()) {
try {
insertValues(handle, entry.getKey(), entry.getValue(), "2015-01-01 00:00:00");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>>of(JDBCExtractionNamespace.class, new ExtractionNamespaceCacheFactory<JDBCExtractionNamespace>() {
private final JDBCExtractionNamespaceCacheFactory delegate = new JDBCExtractionNamespaceCacheFactory();
@Override
public CacheScheduler.VersionedCache populateCache(final JDBCExtractionNamespace namespace, final CacheScheduler.EntryImpl<JDBCExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
updateLock.lockInterruptibly();
try {
log.debug("Running cache populator");
try {
return delegate.populateCache(namespace, id, lastVersion, scheduler);
} finally {
updates.incrementAndGet();
}
} finally {
updateLock.unlock();
}
}
}), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter));
try {
lifecycle.start();
} catch (Exception e) {
throw Throwables.propagate(e);
}
closer.register(new Closeable() {
@Override
public void close() throws IOException {
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
lifecycle.stop();
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() throws IOException {
future.cancel(true);
}
}) {
future.get(30, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error stopping lifecycle", e);
}
}
});
return handle;
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() throws IOException {
if (!setupFuture.isDone() && !setupFuture.cancel(true) && !setupFuture.isDone()) {
throw new IOException("Unable to stop future");
}
}
}) {
handleRef = setupFuture.get(10, TimeUnit.SECONDS);
}
Assert.assertNotNull(handleRef);
}
use of io.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class QueryResourceTest method testDenySecuredGetServer.
@Test(timeout = 60_000L)
public void testDenySecuredGetServer() throws Exception {
final CountDownLatch waitForCancellationLatch = new CountDownLatch(1);
final CountDownLatch waitFinishLatch = new CountDownLatch(2);
final CountDownLatch startAwaitLatch = new CountDownLatch(1);
EasyMock.expect(testServletRequest.getAttribute(EasyMock.anyString())).andReturn(new AuthorizationInfo() {
@Override
public Access isAuthorized(Resource resource, Action action) {
// WRITE corresponds to cancellation of query
if (action.equals(Action.READ)) {
try {
waitForCancellationLatch.await();
} catch (InterruptedException e) {
Throwables.propagate(e);
}
return new Access(true);
} else {
// Deny access to cancel the query
return new Access(false);
}
}
}).times(2);
EasyMock.replay(testServletRequest);
queryResource = new QueryResource(warehouse, serverConfig, jsonMapper, jsonMapper, testSegmentWalker, new NoopServiceEmitter(), new NoopRequestLogger(), queryManager, new AuthConfig(true));
final String queryString = "{\"queryType\":\"timeBoundary\", \"dataSource\":\"allow\"," + "\"context\":{\"queryId\":\"id_1\"}}";
ObjectMapper mapper = new DefaultObjectMapper();
Query query = mapper.readValue(queryString, Query.class);
ListenableFuture future = MoreExecutors.listeningDecorator(Execs.singleThreaded("test_query_resource_%s")).submit(new Runnable() {
@Override
public void run() {
try {
startAwaitLatch.countDown();
Response response = queryResource.doPost(new ByteArrayInputStream(queryString.getBytes("UTF-8")), null, testServletRequest);
Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
} catch (IOException e) {
Throwables.propagate(e);
}
waitFinishLatch.countDown();
}
});
queryManager.registerQuery(query, future);
startAwaitLatch.await();
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
Response response = queryResource.getServer("id_1", testServletRequest);
Assert.assertEquals(Response.Status.FORBIDDEN.getStatusCode(), response.getStatus());
waitForCancellationLatch.countDown();
waitFinishLatch.countDown();
}
});
waitFinishLatch.await();
}
Aggregations