use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.
the class CliCoordinator method getModules.
@Override
protected List<? extends Module> getModules() {
List<Module> modules = new ArrayList<>();
modules.add(JettyHttpClientModule.global());
modules.add(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to(TieredBrokerConfig.DEFAULT_COORDINATOR_SERVICE_NAME);
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(8081);
binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(8281);
ConfigProvider.bind(binder, DruidCoordinatorConfig.class);
binder.bind(MetadataStorage.class).toProvider(MetadataStorageProvider.class);
JsonConfigProvider.bind(binder, "druid.manager.segments", SegmentsMetadataManagerConfig.class);
JsonConfigProvider.bind(binder, "druid.manager.rules", MetadataRuleManagerConfig.class);
JsonConfigProvider.bind(binder, "druid.manager.lookups", LookupCoordinatorManagerConfig.class);
JsonConfigProvider.bind(binder, "druid.coordinator.balancer", BalancerStrategyFactory.class);
JsonConfigProvider.bind(binder, "druid.coordinator.segment", CoordinatorSegmentWatcherConfig.class);
JsonConfigProvider.bind(binder, "druid.coordinator.balancer.cachingCost", CachingCostBalancerStrategyConfig.class);
binder.bind(RedirectFilter.class).in(LazySingleton.class);
if (beOverlord) {
binder.bind(RedirectInfo.class).to(CoordinatorOverlordRedirectInfo.class).in(LazySingleton.class);
} else {
binder.bind(RedirectInfo.class).to(CoordinatorRedirectInfo.class).in(LazySingleton.class);
}
binder.bind(SegmentsMetadataManager.class).toProvider(SegmentsMetadataManagerProvider.class).in(ManageLifecycle.class);
binder.bind(MetadataRuleManager.class).toProvider(MetadataRuleManagerProvider.class).in(ManageLifecycle.class);
binder.bind(AuditManager.class).toProvider(AuditManagerProvider.class).in(ManageLifecycle.class);
binder.bind(IndexingServiceClient.class).to(HttpIndexingServiceClient.class).in(LazySingleton.class);
binder.bind(LookupCoordinatorManager.class).in(LazySingleton.class);
binder.bind(CoordinatorServerView.class);
binder.bind(DruidCoordinator.class);
LifecycleModule.register(binder, CoordinatorServerView.class);
LifecycleModule.register(binder, MetadataStorage.class);
LifecycleModule.register(binder, DruidCoordinator.class);
binder.bind(JettyServerInitializer.class).to(CoordinatorJettyServerInitializer.class);
Jerseys.addResource(binder, CoordinatorResource.class);
Jerseys.addResource(binder, CompactionResource.class);
Jerseys.addResource(binder, CoordinatorDynamicConfigsResource.class);
Jerseys.addResource(binder, CoordinatorCompactionConfigsResource.class);
Jerseys.addResource(binder, TiersResource.class);
Jerseys.addResource(binder, RulesResource.class);
Jerseys.addResource(binder, ServersResource.class);
Jerseys.addResource(binder, DataSourcesResource.class);
Jerseys.addResource(binder, MetadataResource.class);
Jerseys.addResource(binder, IntervalsResource.class);
Jerseys.addResource(binder, LookupCoordinatorResource.class);
Jerseys.addResource(binder, ClusterResource.class);
Jerseys.addResource(binder, HttpServerInventoryViewResource.class);
LifecycleModule.register(binder, Server.class);
LifecycleModule.register(binder, DataSourcesResource.class);
// Binding for Set of indexing service coordinator Duty
final ConditionalMultibind<CoordinatorDuty> conditionalIndexingServiceDutyMultibind = ConditionalMultibind.create(properties, binder, CoordinatorDuty.class, CoordinatorIndexingServiceDuty.class);
if (conditionalIndexingServiceDutyMultibind.matchCondition("druid.coordinator.merge.on", Predicates.equalTo("true"))) {
throw new UnsupportedOperationException("'druid.coordinator.merge.on' is not supported anymore. " + "Please consider using Coordinator's automatic compaction instead. " + "See https://druid.apache.org/docs/latest/operations/segment-optimization.html and " + "https://druid.apache.org/docs/latest/operations/api-reference.html#compaction-configuration " + "for more details about compaction.");
}
conditionalIndexingServiceDutyMultibind.addConditionBinding("druid.coordinator.kill.on", "true", Predicates.equalTo("true"), KillUnusedSegments.class);
conditionalIndexingServiceDutyMultibind.addConditionBinding("druid.coordinator.kill.pendingSegments.on", "true", Predicates.equalTo("true"), KillStalePendingSegments.class);
// Binding for Set of metadata store management coordinator Ddty
final ConditionalMultibind<CoordinatorDuty> conditionalMetadataStoreManagementDutyMultibind = ConditionalMultibind.create(properties, binder, CoordinatorDuty.class, CoordinatorMetadataStoreManagementDuty.class);
conditionalMetadataStoreManagementDutyMultibind.addConditionBinding("druid.coordinator.kill.supervisor.on", "true", Predicates.equalTo("true"), KillSupervisors.class);
conditionalMetadataStoreManagementDutyMultibind.addConditionBinding("druid.coordinator.kill.audit.on", "true", Predicates.equalTo("true"), KillAuditLog.class);
conditionalMetadataStoreManagementDutyMultibind.addConditionBinding("druid.coordinator.kill.rule.on", "true", Predicates.equalTo("true"), KillRules.class);
conditionalMetadataStoreManagementDutyMultibind.addConditionBinding("druid.coordinator.kill.datasource.on", "true", Predicates.equalTo("true"), KillDatasourceMetadata.class);
conditionalMetadataStoreManagementDutyMultibind.addConditionBinding("druid.coordinator.kill.compaction.on", Predicates.equalTo("true"), KillCompactionConfig.class);
bindAnnouncer(binder, Coordinator.class, DiscoverySideEffectsProvider.create());
Jerseys.addResource(binder, SelfDiscoveryResource.class);
LifecycleModule.registerKey(binder, Key.get(SelfDiscoveryResource.class));
if (!beOverlord) {
// These are needed to deserialize SupervisorSpec for Supervisor Auto Cleanup
binder.bind(TaskStorage.class).toProvider(Providers.of(null));
binder.bind(TaskMaster.class).toProvider(Providers.of(null));
binder.bind(RowIngestionMetersFactory.class).toProvider(Providers.of(null));
}
binder.bind(CoordinatorCustomDutyGroups.class).toProvider(new CoordinatorCustomDutyGroupsProvider()).in(LazySingleton.class);
}
@Provides
@LazySingleton
public LoadQueueTaskMaster getLoadQueueTaskMaster(Provider<CuratorFramework> curatorFrameworkProvider, ObjectMapper jsonMapper, ScheduledExecutorFactory factory, DruidCoordinatorConfig config, @EscalatedGlobal HttpClient httpClient, ZkPathsConfig zkPaths, Lifecycle lifecycle) {
boolean useHttpLoadQueuePeon = "http".equalsIgnoreCase(config.getLoadQueuePeonType());
ExecutorService callBackExec;
if (useHttpLoadQueuePeon) {
callBackExec = Execs.singleThreaded("LoadQueuePeon-callbackexec--%d");
} else {
callBackExec = Execs.multiThreaded(config.getNumCuratorCallBackThreads(), "LoadQueuePeon-callbackexec--%d");
}
ExecutorServices.manageLifecycle(lifecycle, callBackExec);
return new LoadQueueTaskMaster(curatorFrameworkProvider, jsonMapper, factory.create(1, "Master-PeonExec--%d"), callBackExec, config, httpClient, zkPaths);
}
});
if (beOverlord) {
modules.addAll(new CliOverlord().getModules(false));
} else {
// Only add LookupSerdeModule if !beOverlord, since CliOverlord includes it, and having two copies causes
// the injector to get confused due to having multiple bindings for the same classes.
modules.add(new LookupSerdeModule());
}
return modules;
}
use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.
the class DruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
druidServer = EasyMock.createMock(DruidServer.class);
serverInventoryView = EasyMock.createMock(BatchServerInventoryView.class);
segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
loadQueuePeon = new CuratorLoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
loadQueuePeon.start();
druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
loadManagementPeons = new ConcurrentHashMap<>();
scheduledExecutorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
return Executors.newSingleThreadScheduledExecutor();
}
};
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new HashSet<>(), new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.
the class DruidCoordinatorTest method testCompactSegmentsDutyWhenCustomDutyGroupEmpty.
@Test
public void testCompactSegmentsDutyWhenCustomDutyGroupEmpty() {
CoordinatorCustomDutyGroups emptyCustomDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of());
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), emptyCustomDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
// Since CompactSegments is not enabled in Custom Duty Group, then CompactSegments must be created in IndexingServiceDuties
List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
Assert.assertTrue(indexingDuties.stream().anyMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
// CompactSegments should not exist in Custom Duty Group
List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
Assert.assertTrue(compactSegmentsDutyFromCustomGroups.isEmpty());
// CompactSegments returned by this method should be created using the DruidCoordinatorConfig in the DruidCoordinator
CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
Assert.assertNotNull(duty);
Assert.assertEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.
the class DruidCoordinatorTest method testInitializeCompactSegmentsDutyWhenCustomDutyGroupContainsCompactSegments.
@Test
public void testInitializeCompactSegmentsDutyWhenCustomDutyGroupContainsCompactSegments() {
DruidCoordinatorConfig differentConfigUsedInCustomGroup = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"), false);
CoordinatorCustomDutyGroup compactSegmentCustomGroup = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(new CompactSegments(differentConfigUsedInCustomGroup, null, null)));
CoordinatorCustomDutyGroups customDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of(compactSegmentCustomGroup));
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), customDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
// Since CompactSegments is enabled in Custom Duty Group, then CompactSegments must not be created in IndexingServiceDuties
List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
Assert.assertTrue(indexingDuties.stream().noneMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
// CompactSegments should exist in Custom Duty Group
List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
Assert.assertFalse(compactSegmentsDutyFromCustomGroups.isEmpty());
Assert.assertEquals(1, compactSegmentsDutyFromCustomGroups.size());
Assert.assertNotNull(compactSegmentsDutyFromCustomGroups.get(0));
Assert.assertTrue(compactSegmentsDutyFromCustomGroups.get(0) instanceof CompactSegments);
// CompactSegments returned by this method should be from the Custom Duty Group
CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
Assert.assertNotNull(duty);
Assert.assertNotEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
// We should get the CompactSegment from the custom duty group which was created with a different config than the config in DruidCoordinator
Assert.assertEquals(differentConfigUsedInCustomGroup.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.
the class BatchDataSegmentAnnouncerTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(TEST_BASE_PATH);
jsonMapper = TestHelper.makeJsonMapper();
announcer = new TestAnnouncer(cf, Execs.directExecutor());
announcer.start();
segmentReader = new SegmentReader(cf, jsonMapper);
skipDimensionsAndMetrics = false;
skipLoadSpec = false;
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
@Override
public long getMaxBytesPerNode() {
return maxBytesPerNode.get();
}
@Override
public boolean isSkipDimensionsAndMetrics() {
return skipDimensionsAndMetrics;
}
@Override
public boolean isSkipLoadSpec() {
return skipLoadSpec;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, announcer, jsonMapper);
testSegments = new HashSet<>();
for (int i = 0; i < 100; i++) {
testSegments.add(makeSegment(i));
}
exec = Execs.multiThreaded(NUM_THREADS, "BatchDataSegmentAnnouncerTest-%d");
}
Aggregations