use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class NamespacedExtractorModuleTest method setUp.
@Before
public void setUp() throws Exception {
final Map<Class<? extends ExtractionNamespace>, CacheGenerator<?>> factoryMap = ImmutableMap.of(UriExtractionNamespace.class, new UriCacheGenerator(ImmutableMap.of("file", new LocalFileTimestampVersionFinder())), JdbcExtractionNamespace.class, new JdbcCacheGenerator());
lifecycle = new Lifecycle();
lifecycle.start();
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, factoryMap, new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class JdbcExtractionNamespaceTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
updates = new AtomicLong(0L);
updateLock = new ReentrantLock(true);
closer = Closer.create();
setupTeardownService = MoreExecutors.listeningDecorator(Execs.multiThreaded(2, "JDBCExtractionNamespaceTeardown--%s"));
final ListenableFuture<Handle> setupFuture = setupTeardownService.submit(new Callable<Handle>() {
@Override
public Handle call() {
final Handle handle = derbyConnectorRule.getConnector().getDBI().open();
Assert.assertEquals(0, handle.createStatement(StringUtils.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64), %s VARCHAR(64))", TABLE_NAME, TS_COLUMN, FILTER_COLUMN, KEY_NAME, VAL_NAME)).setQueryTimeout(1).execute());
handle.createStatement(StringUtils.format("TRUNCATE TABLE %s", TABLE_NAME)).setQueryTimeout(1).execute();
handle.commit();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
handle.createStatement("DROP TABLE " + TABLE_NAME).setQueryTimeout(1).execute();
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
handle.close();
}
});
try (Closeable closeable = new Closeable() {
@Override
public void close() {
future.cancel(true);
}
}) {
future.get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error closing handle", e);
}
}
});
closer.register(new Closeable() {
@Override
public void close() {
if (scheduler == null) {
return;
}
Assert.assertEquals(0, scheduler.getActiveEntries());
}
});
for (Map.Entry<String, String[]> entry : RENAMES.entrySet()) {
try {
String key = entry.getKey();
String value = entry.getValue()[0];
String filter = entry.getValue()[1];
insertValues(handle, key, value, filter, "2015-01-01 00:00:00");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, ImmutableMap.of(JdbcExtractionNamespace.class, new CacheGenerator<JdbcExtractionNamespace>() {
private final JdbcCacheGenerator delegate = new JdbcCacheGenerator();
@Override
public CacheScheduler.VersionedCache generateCache(final JdbcExtractionNamespace namespace, final CacheScheduler.EntryImpl<JdbcExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
updateLock.lockInterruptibly();
try {
log.debug("Running cache generator");
try {
return delegate.generateCache(namespace, id, lastVersion, scheduler);
} finally {
updates.incrementAndGet();
}
} finally {
updateLock.unlock();
}
}
}), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
try {
lifecycle.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
closer.register(new Closeable() {
@Override
public void close() throws IOException {
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
lifecycle.stop();
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() {
future.cancel(true);
}
}) {
future.get(30, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error stopping lifecycle", e);
}
}
});
return handle;
}
});
try (final Closeable ignore = () -> setupFuture.cancel(true)) {
handleRef = setupFuture.get(10, TimeUnit.SECONDS);
}
Assert.assertNotNull(handleRef);
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class CachingClusteredClientBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
final String schemaName = "basic";
parallelCombine = parallelism > 0;
GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
Map<DataSegment, QueryableIndex> queryableIndexes = Maps.newHashMapWithExpectedSize(numServers);
for (int i = 0; i < numServers; i++) {
final DataSegment dataSegment = DataSegment.builder().dataSource(DATA_SOURCE).interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(i)).size(0).build();
final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
LOG.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
queryableIndexes.put(dataSegment, index);
}
final DruidProcessingConfig processingConfig = new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
@Override
public int intermediateComputeSizeBytes() {
return PROCESSING_BUFFER_SIZE;
}
@Override
public int getNumMergeBuffers() {
return 1;
}
@Override
public int getNumThreads() {
return numProcessingThreads;
}
@Override
public boolean useParallelMergePool() {
return true;
}
};
conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(new StupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, makeGroupByQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return GroupByStrategySelector.STRATEGY_V2;
}
}, processingConfig)).build());
toolChestWarehouse = new QueryToolChestWarehouse() {
@Override
public <T, QueryType extends Query<T>> QueryToolChest<T, QueryType> getToolChest(final QueryType query) {
return conglomerate.findFactory(query).getToolchest();
}
};
SimpleServerView serverView = new SimpleServerView();
int serverSuffx = 1;
for (Entry<DataSegment, QueryableIndex> entry : queryableIndexes.entrySet()) {
serverView.addServer(createServer(serverSuffx++), entry.getKey(), entry.getValue());
}
processingPool = Execs.multiThreaded(processingConfig.getNumThreads(), "caching-clustered-client-benchmark");
forkJoinPool = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * 0.75), ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);
cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, serverView, MapCache.create(0), JSON_MAPPER, new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), processingConfig, forkJoinPool, QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class UriCacheGeneratorTest method getParameters.
@Parameterized.Parameters(name = "{0}")
public static Iterable<Object[]> getParameters() {
final List<Object[]> compressionParams = ImmutableList.of(new Object[] { ".dat", new Function<File, OutputStream>() {
@Nullable
@Override
public OutputStream apply(@Nullable File outFile) {
try {
return new FileOutputStream(outFile);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
} }, new Object[] { ".gz", new Function<File, OutputStream>() {
@Nullable
@Override
public OutputStream apply(@Nullable File outFile) {
try {
final FileOutputStream fos = new FileOutputStream(outFile);
return new GZIPOutputStream(fos) {
@Override
public void close() throws IOException {
try {
super.close();
} finally {
fos.close();
}
}
};
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
} });
final List<Function<Lifecycle, NamespaceExtractionCacheManager>> cacheManagerCreators = ImmutableList.of(new Function<Lifecycle, NamespaceExtractionCacheManager>() {
@Override
public NamespaceExtractionCacheManager apply(Lifecycle lifecycle) {
return new OnHeapNamespaceExtractionCacheManager(lifecycle, new NoopServiceEmitter(), new NamespaceExtractionConfig());
}
}, new Function<Lifecycle, NamespaceExtractionCacheManager>() {
@Override
public NamespaceExtractionCacheManager apply(Lifecycle lifecycle) {
return new OffHeapNamespaceExtractionCacheManager(lifecycle, new NoopServiceEmitter(), new NamespaceExtractionConfig());
}
});
return new Iterable<Object[]>() {
@Override
public Iterator<Object[]> iterator() {
return new Iterator<Object[]>() {
Iterator<Object[]> compressionIt = compressionParams.iterator();
Iterator<Function<Lifecycle, NamespaceExtractionCacheManager>> cacheManagerCreatorsIt = cacheManagerCreators.iterator();
Object[] compressions = compressionIt.next();
@Override
public boolean hasNext() {
return compressionIt.hasNext() || cacheManagerCreatorsIt.hasNext();
}
@Override
public Object[] next() {
if (cacheManagerCreatorsIt.hasNext()) {
Function<Lifecycle, NamespaceExtractionCacheManager> cacheManagerCreator = cacheManagerCreatorsIt.next();
return new Object[] { compressions[0], compressions[1], cacheManagerCreator };
} else {
cacheManagerCreatorsIt = cacheManagerCreators.iterator();
compressions = compressionIt.next();
return next();
}
}
@Override
public void remove() {
throw new UOE("Cannot remove");
}
};
}
};
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class StaticMapCacheGeneratorTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
lifecycle.start();
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, Collections.emptyMap(), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
}
Aggregations