use of io.druid.client.cache.LocalCacheProvider in project druid by druid-io.
the class ServerManagerTest method setUp.
@Before
public void setUp() throws IOException {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
queryWaitLatch = new CountDownLatch(1);
queryWaitYieldLatch = new CountDownLatch(1);
queryNotifyLatch = new CountDownLatch(1);
factory = new MyQueryRunnerFactory(queryWaitLatch, queryWaitYieldLatch, queryNotifyLatch);
serverManagerExec = Executors.newFixedThreadPool(2);
serverManager = new ServerManager(new SegmentLoader() {
@Override
public boolean isSegmentLoaded(DataSegment segment) throws SegmentLoadingException {
return false;
}
@Override
public Segment getSegment(final DataSegment segment) {
return new SegmentForTesting(MapUtils.getString(segment.getLoadSpec(), "version"), (Interval) segment.getLoadSpec().get("interval"));
}
@Override
public File getSegmentFiles(DataSegment segment) throws SegmentLoadingException {
throw new UnsupportedOperationException();
}
@Override
public void cleanup(DataSegment segment) throws SegmentLoadingException {
}
}, new QueryRunnerFactoryConglomerate() {
@Override
public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(QueryType query) {
return (QueryRunnerFactory) factory;
}
}, new NoopServiceEmitter(), serverManagerExec, MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
loadQueryable("test", "1", new Interval("P1d/2011-04-01"));
loadQueryable("test", "1", new Interval("P1d/2011-04-02"));
loadQueryable("test", "2", new Interval("P1d/2011-04-02"));
loadQueryable("test", "1", new Interval("P1d/2011-04-03"));
loadQueryable("test", "1", new Interval("P1d/2011-04-04"));
loadQueryable("test", "1", new Interval("P1d/2011-04-05"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T01"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T02"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T03"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T05"));
loadQueryable("test", "2", new Interval("PT1h/2011-04-04T06"));
loadQueryable("test2", "1", new Interval("P1d/2011-04-01"));
loadQueryable("test2", "1", new Interval("P1d/2011-04-02"));
}
use of io.druid.client.cache.LocalCacheProvider in project druid by druid-io.
the class ZkCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
try {
infoDir = new File(File.createTempFile("blah", "blah2").getParent(), "ZkCoordinatorTest");
infoDir.mkdirs();
for (File file : infoDir.listFiles()) {
file.delete();
}
log.info("Creating tmp test files in [%s]", infoDir);
} catch (IOException e) {
throw new RuntimeException(e);
}
scheduledRunnable = Lists.newArrayList();
segmentLoader = new CacheTestSegmentLoader();
serverManager = new ServerManager(segmentLoader, new NoopQueryRunnerFactoryConglomerate(), new NoopServiceEmitter(), MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
final ZkPathsConfig zkPaths = new ZkPathsConfig() {
@Override
public String getBase() {
return "/druid";
}
};
segmentsAnnouncedByMe = new ConcurrentSkipListSet<>();
announceCount = new AtomicInteger(0);
announcer = new DataSegmentAnnouncer() {
private final DataSegmentAnnouncer delegate = new BatchDataSegmentAnnouncer(me, new BatchDataSegmentAnnouncerConfig(), zkPaths, new Announcer(curator, Execs.singleThreaded("blah")), jsonMapper);
@Override
public void announceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.add(segment);
announceCount.incrementAndGet();
delegate.announceSegment(segment);
}
@Override
public void unannounceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.remove(segment);
announceCount.decrementAndGet();
delegate.unannounceSegment(segment);
}
@Override
public void announceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.add(segment);
}
announceCount.addAndGet(Iterables.size(segments));
delegate.announceSegments(segments);
}
@Override
public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.remove(segment);
}
announceCount.addAndGet(-Iterables.size(segments));
delegate.unannounceSegments(segments);
}
@Override
public boolean isAnnounced(DataSegment segment) {
return segmentsAnnouncedByMe.contains(segment);
}
};
zkCoordinator = new ZkCoordinator(jsonMapper, new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return infoDir;
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
}, zkPaths, me, announcer, curator, serverManager, new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, String nameFormat) {
/*
Override normal behavoir by adding the runnable to a list so that you can make sure
all the shceduled runnables are executed by explicitly calling run() on each item in the list
*/
return new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(nameFormat).build()) {
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
scheduledRunnable.add(command);
return null;
}
};
}
});
}
Aggregations