use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class SegmentLoadDropHandlerCacheTest method setup.
@Before
public void setup() throws IOException {
storageLoc = new TestStorageLocation(temporaryFolder);
SegmentLoaderConfig config = new SegmentLoaderConfig().withLocations(Collections.singletonList(storageLoc.toStorageLocationConfig(MAX_SIZE, null))).withInfoDir(storageLoc.getInfoDir());
objectMapper = TestHelper.makeJsonMapper();
objectMapper.registerSubtypes(TestLoadSpec.class);
objectMapper.registerSubtypes(TestSegmentizerFactory.class);
SegmentCacheManager cacheManager = new SegmentLocalCacheManager(config, objectMapper);
SegmentManager segmentManager = new SegmentManager(new SegmentLocalCacheLoader(cacheManager, TestIndex.INDEX_IO, objectMapper));
segmentAnnouncer = Mockito.mock(DataSegmentAnnouncer.class);
loadDropHandler = new SegmentLoadDropHandler(objectMapper, config, segmentAnnouncer, Mockito.mock(DataSegmentServerAnnouncer.class), segmentManager, cacheManager, new ServerTypeConfig(ServerType.HISTORICAL));
EmittingLogger.registerEmitter(new NoopServiceEmitter());
}
use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class SegmentLoadDropHandlerTest method testProcessBatchDuplicateLoadRequestsWhenFirstRequestFailsSecondRequestShouldSucceed.
@Test(timeout = 60_000L)
public void testProcessBatchDuplicateLoadRequestsWhenFirstRequestFailsSecondRequestShouldSucceed() throws Exception {
final SegmentManager segmentManager = Mockito.mock(SegmentManager.class);
Mockito.when(segmentManager.loadSegment(ArgumentMatchers.any(), ArgumentMatchers.anyBoolean(), ArgumentMatchers.any())).thenThrow(new RuntimeException("segment loading failure test")).thenReturn(true);
final SegmentLoadDropHandler segmentLoadDropHandler = new SegmentLoadDropHandler(jsonMapper, segmentLoaderConfig, announcer, Mockito.mock(DataSegmentServerAnnouncer.class), segmentManager, segmentCacheManager, scheduledExecutorFactory.create(5, "SegmentLoadDropHandlerTest-[%d]"), new ServerTypeConfig(ServerType.HISTORICAL));
segmentLoadDropHandler.start();
DataSegment segment1 = makeSegment("batchtest1", "1", Intervals.of("P1d/2011-04-01"));
List<DataSegmentChangeRequest> batch = ImmutableList.of(new SegmentChangeRequestLoad(segment1));
ListenableFuture<List<DataSegmentChangeRequestAndStatus>> future = segmentLoadDropHandler.processBatch(batch);
for (Runnable runnable : scheduledRunnable) {
runnable.run();
}
List<DataSegmentChangeRequestAndStatus> result = future.get();
Assert.assertEquals(STATE.FAILED, result.get(0).getStatus().getState());
future = segmentLoadDropHandler.processBatch(batch);
for (Runnable runnable : scheduledRunnable) {
runnable.run();
}
result = future.get();
Assert.assertEquals(STATE.SUCCESS, result.get(0).getStatus().getState());
segmentLoadDropHandler.stop();
}
use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class ZkCoordinatorTest method testLoadDrop.
@Test(timeout = 60_000L)
public void testLoadDrop() throws Exception {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
DataSegment segment = new DataSegment("test", Intervals.of("P1d/2011-04-02"), "v0", ImmutableMap.of("version", "v0", "interval", Intervals.of("P1d/2011-04-02"), "cacheDir", "/no"), Arrays.asList("dim1", "dim2", "dim3"), Arrays.asList("metric1", "metric2"), NoneShardSpec.instance(), IndexIO.CURRENT_VERSION_ID, 123L);
CountDownLatch loadLatch = new CountDownLatch(1);
CountDownLatch dropLatch = new CountDownLatch(1);
SegmentLoadDropHandler segmentLoadDropHandler = new SegmentLoadDropHandler(ServerTestHelper.MAPPER, new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return infoDir;
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public List<StorageLocationConfig> getLocations() {
return locations;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
}, EasyMock.createNiceMock(DataSegmentAnnouncer.class), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), EasyMock.createNiceMock(SegmentManager.class), EasyMock.createNiceMock(SegmentCacheManager.class), EasyMock.createNiceMock(ScheduledExecutorService.class), new ServerTypeConfig(ServerType.HISTORICAL)) {
@Override
public void addSegment(DataSegment s, DataSegmentChangeCallback callback) {
if (segment.getId().equals(s.getId())) {
loadLatch.countDown();
callback.execute();
}
}
@Override
public void removeSegment(DataSegment s, DataSegmentChangeCallback callback) {
if (segment.getId().equals(s.getId())) {
dropLatch.countDown();
callback.execute();
}
}
};
zkCoordinator = new ZkCoordinator(segmentLoadDropHandler, jsonMapper, zkPaths, me, curator, new SegmentLoaderConfig());
zkCoordinator.start();
String segmentZkPath = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName(), segment.getId().toString());
curator.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(segmentZkPath, jsonMapper.writeValueAsBytes(new SegmentChangeRequestLoad(segment)));
loadLatch.await();
while (curator.checkExists().forPath(segmentZkPath) != null) {
Thread.sleep(100);
}
curator.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(segmentZkPath, jsonMapper.writeValueAsBytes(new SegmentChangeRequestDrop(segment)));
dropLatch.await();
while (curator.checkExists().forPath(segmentZkPath) != null) {
Thread.sleep(100);
}
zkCoordinator.stop();
}
use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class CliBroker method getModules.
@Override
protected List<? extends Module> getModules() {
return ImmutableList.of(new BrokerProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), new SegmentWranglerModule(), new JoinableFactoryModule(), new BrokerServiceModule(), binder -> {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to(TieredBrokerConfig.DEFAULT_BROKER_SERVICE_NAME);
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(8082);
binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(8282);
binder.bindConstant().annotatedWith(PruneLoadSpec.class).to(true);
binder.bind(ResponseContextConfig.class).toInstance(ResponseContextConfig.newConfig(false));
binder.bind(CachingClusteredClient.class).in(LazySingleton.class);
LifecycleModule.register(binder, BrokerServerView.class);
binder.bind(TimelineServerView.class).to(BrokerServerView.class).in(LazySingleton.class);
JsonConfigProvider.bind(binder, "druid.broker.cache", CacheConfig.class);
binder.install(new CacheModule());
JsonConfigProvider.bind(binder, "druid.broker.select", TierSelectorStrategy.class);
JsonConfigProvider.bind(binder, "druid.broker.select.tier.custom", CustomTierSelectorStrategyConfig.class);
JsonConfigProvider.bind(binder, "druid.broker.balancer", ServerSelectorStrategy.class);
JsonConfigProvider.bind(binder, "druid.broker.retryPolicy", RetryQueryRunnerConfig.class);
JsonConfigProvider.bind(binder, "druid.broker.segment", BrokerSegmentWatcherConfig.class);
JsonConfigProvider.bind(binder, "druid.broker.internal.query.config", BrokerInternalQueryConfig.class);
binder.bind(QuerySegmentWalker.class).to(ClientQuerySegmentWalker.class).in(LazySingleton.class);
binder.bind(JettyServerInitializer.class).to(QueryJettyServerInitializer.class).in(LazySingleton.class);
binder.bind(BrokerQueryResource.class).in(LazySingleton.class);
Jerseys.addResource(binder, BrokerQueryResource.class);
binder.bind(QueryCountStatsProvider.class).to(BrokerQueryResource.class).in(LazySingleton.class);
Jerseys.addResource(binder, BrokerResource.class);
Jerseys.addResource(binder, ClientInfoResource.class);
LifecycleModule.register(binder, BrokerQueryResource.class);
Jerseys.addResource(binder, HttpServerInventoryViewResource.class);
LifecycleModule.register(binder, Server.class);
binder.bind(SegmentManager.class).in(LazySingleton.class);
binder.bind(ZkCoordinator.class).in(ManageLifecycle.class);
binder.bind(ServerTypeConfig.class).toInstance(new ServerTypeConfig(ServerType.BROKER));
Jerseys.addResource(binder, HistoricalResource.class);
Jerseys.addResource(binder, SegmentListerResource.class);
if (isZkEnabled) {
LifecycleModule.register(binder, ZkCoordinator.class);
}
bindAnnouncer(binder, DiscoverySideEffectsProvider.withLegacyAnnouncer());
Jerseys.addResource(binder, SelfDiscoveryResource.class);
LifecycleModule.registerKey(binder, Key.get(SelfDiscoveryResource.class));
}, new LookupModule(), new SqlModule());
}
use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class CliHistorical method getModules.
@Override
protected List<? extends Module> getModules() {
return ImmutableList.of(new DruidProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), new JoinableFactoryModule(), new HistoricalServiceModule(), binder -> {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("druid/historical");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(8083);
binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(8283);
binder.bindConstant().annotatedWith(PruneLastCompactionState.class).to(true);
binder.bind(ResponseContextConfig.class).toInstance(ResponseContextConfig.newConfig(true));
// register Server before binding ZkCoordinator to ensure HTTP endpoints are available immediately
LifecycleModule.register(binder, Server.class);
binder.bind(ServerManager.class).in(LazySingleton.class);
binder.bind(SegmentManager.class).in(LazySingleton.class);
binder.bind(ZkCoordinator.class).in(ManageLifecycle.class);
bindQuerySegmentWalker(binder);
binder.bind(ServerTypeConfig.class).toInstance(new ServerTypeConfig(ServerType.HISTORICAL));
binder.bind(JettyServerInitializer.class).to(QueryJettyServerInitializer.class).in(LazySingleton.class);
binder.bind(QueryCountStatsProvider.class).to(QueryResource.class);
Jerseys.addResource(binder, QueryResource.class);
Jerseys.addResource(binder, SegmentListerResource.class);
Jerseys.addResource(binder, HistoricalResource.class);
LifecycleModule.register(binder, QueryResource.class);
if (isZkEnabled) {
LifecycleModule.register(binder, ZkCoordinator.class);
}
JsonConfigProvider.bind(binder, "druid.historical.cache", CacheConfig.class);
binder.install(new CacheModule());
bindAnnouncer(binder, DiscoverySideEffectsProvider.create());
Jerseys.addResource(binder, SelfDiscoveryResource.class);
LifecycleModule.registerKey(binder, Key.get(SelfDiscoveryResource.class));
}, new LookupModule());
}
Aggregations