use of io.druid.client.cache.CacheConfig in project druid by druid-io.
the class ServerManagerTest method setUp.
@Before
public void setUp() throws IOException {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
queryWaitLatch = new CountDownLatch(1);
queryWaitYieldLatch = new CountDownLatch(1);
queryNotifyLatch = new CountDownLatch(1);
factory = new MyQueryRunnerFactory(queryWaitLatch, queryWaitYieldLatch, queryNotifyLatch);
serverManagerExec = Executors.newFixedThreadPool(2);
serverManager = new ServerManager(new SegmentLoader() {
@Override
public boolean isSegmentLoaded(DataSegment segment) throws SegmentLoadingException {
return false;
}
@Override
public Segment getSegment(final DataSegment segment) {
return new SegmentForTesting(MapUtils.getString(segment.getLoadSpec(), "version"), (Interval) segment.getLoadSpec().get("interval"));
}
@Override
public File getSegmentFiles(DataSegment segment) throws SegmentLoadingException {
throw new UnsupportedOperationException();
}
@Override
public void cleanup(DataSegment segment) throws SegmentLoadingException {
}
}, new QueryRunnerFactoryConglomerate() {
@Override
public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(QueryType query) {
return (QueryRunnerFactory) factory;
}
}, new NoopServiceEmitter(), serverManagerExec, MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
loadQueryable("test", "1", new Interval("P1d/2011-04-01"));
loadQueryable("test", "1", new Interval("P1d/2011-04-02"));
loadQueryable("test", "2", new Interval("P1d/2011-04-02"));
loadQueryable("test", "1", new Interval("P1d/2011-04-03"));
loadQueryable("test", "1", new Interval("P1d/2011-04-04"));
loadQueryable("test", "1", new Interval("P1d/2011-04-05"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T01"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T02"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T03"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T05"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T06"));
loadQueryable("test2", "1", new Interval("P1d/2011-04-01"));
loadQueryable("test2", "1", new Interval("P1d/2011-04-02"));
}
use of io.druid.client.cache.CacheConfig in project druid by druid-io.
the class KafkaIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory() throws IOException {
directory = tempFolder.newFolder();
final TestUtils testUtils = new TestUtils();
final ObjectMapper objectMapper = testUtils.getTestObjectMapper();
for (Module module : new KafkaIndexTaskModule().getJacksonModules()) {
objectMapper.registerModule(module);
}
final TaskConfig taskConfig = new TaskConfig(new File(directory, "taskBaseDir").getPath(), null, null, 50000, null, false, null, null);
final TestDerbyConnector derbyConnector = derby.getConnector();
derbyConnector.createDataSourceTable();
derbyConnector.createPendingSegmentsTable();
derbyConnector.createSegmentTable();
derbyConnector.createRulesTable();
derbyConnector.createConfigTable();
derbyConnector.createTaskTables();
derbyConnector.createAuditTable();
taskStorage = new MetadataTaskStorage(derbyConnector, new TaskStorageConfig(null), new SQLMetadataStorageActionHandlerFactory(derbyConnector, derby.metadataTablesConfigSupplier().get(), objectMapper));
metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(testUtils.getTestObjectMapper(), derby.metadataTablesConfigSupplier().get(), derbyConnector);
taskLockbox = new TaskLockbox(taskStorage);
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, metadataStorageCoordinator, emitter, new SupervisorManager(null));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox);
final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory() {
@Override
public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) {
return new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
if (doHandoff) {
// Simulate immediate handoff
exec.execute(handOffRunnable);
}
return true;
}
@Override
public void start() {
//Noop
}
@Override
public void close() {
//Noop
}
};
}
};
final LocalDataSegmentPusherConfig dataSegmentPusherConfig = new LocalDataSegmentPusherConfig();
dataSegmentPusherConfig.storageDirectory = getSegmentDirectory();
final DataSegmentPusher dataSegmentPusher = new LocalDataSegmentPusher(dataSegmentPusherConfig, objectMapper);
toolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter, dataSegmentPusher, new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), handoffNotifierFactory, makeTimeseriesOnlyConglomerate(), // queryExecutorService
MoreExecutors.sameThreadExecutor(), EasyMock.createMock(MonitorScheduler.class), new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, testUtils.getTestObjectMapper())), testUtils.getTestObjectMapper(), testUtils.getTestIndexMerger(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), testUtils.getTestIndexMergerV9());
}
use of io.druid.client.cache.CacheConfig in project druid by druid-io.
the class CachingQueryRunnerTest method testCloseAndPopulate.
private void testCloseAndPopulate(List<Result> expectedRes, List<Result> expectedCacheRes, Query query, QueryToolChest toolchest) throws Exception {
final AssertingClosable closable = new AssertingClosable();
final Sequence resultSeq = Sequences.wrap(Sequences.simple(expectedRes), new SequenceWrapper() {
@Override
public void before() {
Assert.assertFalse(closable.isClosed());
}
@Override
public void after(boolean isDone, Throwable thrown) throws Exception {
closable.close();
}
});
final CountDownLatch cacheMustBePutOnce = new CountDownLatch(1);
Cache cache = new Cache() {
private final Map<NamedKey, byte[]> baseMap = new ConcurrentHashMap<>();
@Override
public byte[] get(NamedKey key) {
return baseMap.get(key);
}
@Override
public void put(NamedKey key, byte[] value) {
baseMap.put(key, value);
cacheMustBePutOnce.countDown();
}
@Override
public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
return null;
}
@Override
public void close(String namespace) {
}
@Override
public CacheStats getStats() {
return null;
}
@Override
public boolean isLocal() {
return true;
}
@Override
public void doMonitor(ServiceEmitter emitter) {
}
};
String segmentIdentifier = "segment";
SegmentDescriptor segmentDescriptor = new SegmentDescriptor(new Interval("2011/2012"), "version", 0);
DefaultObjectMapper objectMapper = new DefaultObjectMapper();
CachingQueryRunner runner = new CachingQueryRunner(segmentIdentifier, segmentDescriptor, objectMapper, cache, toolchest, new QueryRunner() {
@Override
public Sequence run(Query query, Map responseContext) {
return resultSeq;
}
}, backgroundExecutorService, new CacheConfig() {
@Override
public boolean isPopulateCache() {
return true;
}
@Override
public boolean isUseCache() {
return true;
}
});
CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, cacheStrategy.computeCacheKey(query));
HashMap<String, Object> context = new HashMap<String, Object>();
Sequence res = runner.run(query, context);
// base sequence is not closed yet
Assert.assertFalse("sequence must not be closed", closable.isClosed());
Assert.assertNull("cache must be empty", cache.get(cacheKey));
ArrayList results = Sequences.toList(res, new ArrayList());
Assert.assertTrue(closable.isClosed());
Assert.assertEquals(expectedRes.toString(), results.toString());
// wait for background caching finish
// wait at most 10 seconds to fail the test to avoid block overall tests
Assert.assertTrue("cache must be populated", cacheMustBePutOnce.await(10, TimeUnit.SECONDS));
byte[] cacheValue = cache.get(cacheKey);
Assert.assertNotNull(cacheValue);
Function<Object, Result> fn = cacheStrategy.pullFromCache();
List<Result> cacheResults = Lists.newArrayList(Iterators.transform(objectMapper.readValues(objectMapper.getFactory().createParser(cacheValue), cacheStrategy.getCacheObjectClazz()), fn));
Assert.assertEquals(expectedCacheRes.toString(), cacheResults.toString());
}
use of io.druid.client.cache.CacheConfig in project druid by druid-io.
the class CachingQueryRunnerTest method testUseCache.
private void testUseCache(List<Result> expectedResults, Query query, QueryToolChest toolchest) throws Exception {
DefaultObjectMapper objectMapper = new DefaultObjectMapper();
String segmentIdentifier = "segment";
SegmentDescriptor segmentDescriptor = new SegmentDescriptor(new Interval("2011/2012"), "version", 0);
CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, cacheStrategy.computeCacheKey(query));
Cache cache = MapCache.create(1024 * 1024);
CacheUtil.populate(cache, objectMapper, cacheKey, Iterables.transform(expectedResults, cacheStrategy.prepareForCache()));
CachingQueryRunner runner = new CachingQueryRunner(segmentIdentifier, segmentDescriptor, objectMapper, cache, toolchest, // return an empty sequence since results should get pulled from cache
new QueryRunner() {
@Override
public Sequence run(Query query, Map responseContext) {
return Sequences.empty();
}
}, backgroundExecutorService, new CacheConfig() {
@Override
public boolean isPopulateCache() {
return true;
}
@Override
public boolean isUseCache() {
return true;
}
});
HashMap<String, Object> context = new HashMap<String, Object>();
List<Result> results = Sequences.toList(runner.run(query, context), new ArrayList());
Assert.assertEquals(expectedResults.toString(), results.toString());
}
use of io.druid.client.cache.CacheConfig in project druid by druid-io.
the class RealtimeIndexTaskTest method makeToolbox.
private TaskToolbox makeToolbox(final Task task, final TaskStorage taskStorage, final IndexerMetadataStorageCoordinator mdc, final File directory) {
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, false, null, null);
final TaskLockbox taskLockbox = new TaskLockbox(taskStorage);
try {
taskStorage.insert(task, TaskStatus.running(task.getId()));
} catch (EntryExistsException e) {
// suppress
}
taskLockbox.syncFromStorage();
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, mdc, emitter, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox);
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(new IntervalChunkingQueryRunnerDecorator(null, null, null) {
@Override
public <T> QueryRunner<T> decorate(QueryRunner<T> delegate, QueryToolChest<T, ? extends Query<T>> toolChest) {
return delegate;
}
}), new TimeseriesQueryEngine(), new QueryWatcher() {
@Override
public void registerQuery(Query query, ListenableFuture future) {
// do nothing
}
})));
handOffCallbacks = Maps.newConcurrentMap();
final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory() {
@Override
public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) {
return new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
return true;
}
@Override
public void start() {
//Noop
}
@Override
public void close() {
//Noop
}
Map<SegmentDescriptor, Pair<Executor, Runnable>> getHandOffCallbacks() {
return handOffCallbacks;
}
};
}
};
final TestUtils testUtils = new TestUtils();
final TaskToolboxFactory toolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), handoffNotifierFactory, conglomerate, // queryExecutorService
MoreExecutors.sameThreadExecutor(), EasyMock.createMock(MonitorScheduler.class), new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, testUtils.getTestObjectMapper())), testUtils.getTestObjectMapper(), testUtils.getTestIndexMerger(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), testUtils.getTestIndexMergerV9());
return toolboxFactory.build(task);
}
Aggregations