use of io.druid.query.NoopQueryRunnerFactoryConglomerate in project druid by druid-io.
the class ZkCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
try {
infoDir = new File(File.createTempFile("blah", "blah2").getParent(), "ZkCoordinatorTest");
infoDir.mkdirs();
for (File file : infoDir.listFiles()) {
file.delete();
}
log.info("Creating tmp test files in [%s]", infoDir);
} catch (IOException e) {
throw new RuntimeException(e);
}
scheduledRunnable = Lists.newArrayList();
segmentLoader = new CacheTestSegmentLoader();
serverManager = new ServerManager(segmentLoader, new NoopQueryRunnerFactoryConglomerate(), new NoopServiceEmitter(), MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
final ZkPathsConfig zkPaths = new ZkPathsConfig() {
@Override
public String getBase() {
return "/druid";
}
};
segmentsAnnouncedByMe = new ConcurrentSkipListSet<>();
announceCount = new AtomicInteger(0);
announcer = new DataSegmentAnnouncer() {
private final DataSegmentAnnouncer delegate = new BatchDataSegmentAnnouncer(me, new BatchDataSegmentAnnouncerConfig(), zkPaths, new Announcer(curator, Execs.singleThreaded("blah")), jsonMapper);
@Override
public void announceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.add(segment);
announceCount.incrementAndGet();
delegate.announceSegment(segment);
}
@Override
public void unannounceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.remove(segment);
announceCount.decrementAndGet();
delegate.unannounceSegment(segment);
}
@Override
public void announceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.add(segment);
}
announceCount.addAndGet(Iterables.size(segments));
delegate.announceSegments(segments);
}
@Override
public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.remove(segment);
}
announceCount.addAndGet(-Iterables.size(segments));
delegate.unannounceSegments(segments);
}
@Override
public boolean isAnnounced(DataSegment segment) {
return segmentsAnnouncedByMe.contains(segment);
}
};
zkCoordinator = new ZkCoordinator(jsonMapper, new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return infoDir;
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
}, zkPaths, me, announcer, curator, serverManager, new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, String nameFormat) {
/*
Override normal behavoir by adding the runnable to a list so that you can make sure
all the shceduled runnables are executed by explicitly calling run() on each item in the list
*/
return new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(nameFormat).build()) {
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
scheduledRunnable.add(command);
return null;
}
};
}
});
}
Aggregations