Search in sources :

Example 1 with MetadataRuleManager

use of org.apache.druid.metadata.MetadataRuleManager in project druid by druid-io.

the class DruidCoordinatorTest method testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments.

@Test
public void testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments() {
    CoordinatorCustomDutyGroup group = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(new KillSupervisorsCustomDuty(new Duration("PT1S"), null)));
    CoordinatorCustomDutyGroups customDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group));
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), customDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
    // Since CompactSegments is not enabled in Custom Duty Group, then CompactSegments must be created in IndexingServiceDuties
    List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
    Assert.assertTrue(indexingDuties.stream().anyMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
    // CompactSegments should not exist in Custom Duty Group
    List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
    Assert.assertTrue(compactSegmentsDutyFromCustomGroups.isEmpty());
    // CompactSegments returned by this method should be created using the DruidCoordinatorConfig in the DruidCoordinator
    CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
    Assert.assertNotNull(duty);
    Assert.assertEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
Also used : LookupCoordinatorManager(org.apache.druid.server.lookup.cache.LookupCoordinatorManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) Event(org.apache.druid.java.util.emitter.core.Event) After(org.junit.After) Map(java.util.Map) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) ServerType(org.apache.druid.server.coordination.ServerType) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) Rule(org.apache.druid.server.coordinator.rules.Rule) CuratorUtils(org.apache.druid.curator.CuratorUtils) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) DruidLeaderSelector(org.apache.druid.discovery.DruidLeaderSelector) DataSegment(org.apache.druid.timeline.DataSegment) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) SegmentId(org.apache.druid.timeline.SegmentId) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DruidDataSource(org.apache.druid.client.DruidDataSource) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) Intervals(org.apache.druid.java.util.common.Intervals) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) Duration(org.joda.time.Duration) ZkEnablementConfig(org.apache.druid.curator.ZkEnablementConfig) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) AtomicReference(java.util.concurrent.atomic.AtomicReference) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Nullable(javax.annotation.Nullable) Before(org.junit.Before) Capture(org.easymock.Capture) SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) CuratorTestBase(org.apache.druid.curator.CuratorTestBase) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) CoordinatorCustomDuty(org.apache.druid.server.coordinator.duty.CoordinatorCustomDuty) DruidNode(org.apache.druid.server.DruidNode) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) Assert(org.junit.Assert) Collections(java.util.Collections) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) Duration(org.joda.time.Duration) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DruidNode(org.apache.druid.server.DruidNode) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with MetadataRuleManager

use of org.apache.druid.metadata.MetadataRuleManager in project druid by druid-io.

the class RunRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
    // eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
    // as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
    // to unload such segments in UnloadUnusedSegments.
    Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
    // Run through all matched rules for used segments
    DateTime now = DateTimes.nowUtc();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    final Set<String> broadcastDatasources = new HashSet<>();
    for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
        for (Rule rule : rules) {
            // executes before BalanceSegments.
            if (rule instanceof BroadcastDistributionRule) {
                broadcastDatasources.add(dataSource.getName());
                break;
            }
        }
    }
    for (DataSegment segment : params.getUsedSegments()) {
        if (overshadowed.contains(segment.getId())) {
            // Skipping overshadowed segments
            continue;
        }
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
                    log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
                    paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
                }
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getId());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) Rule(org.apache.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 3 with MetadataRuleManager

use of org.apache.druid.metadata.MetadataRuleManager in project druid by druid-io.

the class DataSourcesResourceTest method testIsHandOffComplete.

@Test
public void testIsHandOffComplete() {
    MetadataRuleManager databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
    Rule loadRule = new IntervalLoadRule(Intervals.of("2013-01-02T00:00:00Z/2013-01-03T00:00:00Z"), null);
    Rule dropRule = new IntervalDropRule(Intervals.of("2013-01-01T00:00:00Z/2013-01-02T00:00:00Z"));
    DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, databaseRuleManager, null, null, null);
    // test dropped
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.replay(databaseRuleManager);
    String interval1 = "2013-01-01T01:00:00Z/2013-01-01T02:00:00Z";
    Response response1 = dataSourcesResource.isHandOffComplete("dataSource1", interval1, 1, "v1");
    Assert.assertTrue((boolean) response1.getEntity());
    EasyMock.verify(databaseRuleManager);
    // test isn't dropped and no timeline found
    EasyMock.reset(databaseRuleManager);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(null).once();
    EasyMock.replay(inventoryView, databaseRuleManager);
    String interval2 = "2013-01-02T01:00:00Z/2013-01-02T02:00:00Z";
    Response response2 = dataSourcesResource.isHandOffComplete("dataSource1", interval2, 1, "v1");
    Assert.assertFalse((boolean) response2.getEntity());
    EasyMock.verify(inventoryView, databaseRuleManager);
    // test isn't dropped and timeline exist
    String interval3 = "2013-01-02T02:00:00Z/2013-01-02T03:00:00Z";
    SegmentLoadInfo segmentLoadInfo = new SegmentLoadInfo(createSegment(Intervals.of(interval3), "v1", 1));
    segmentLoadInfo.addServer(createHistoricalServerMetadata("test"));
    VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = new VersionedIntervalTimeline<String, SegmentLoadInfo>(null) {

        @Override
        public List<TimelineObjectHolder<String, SegmentLoadInfo>> lookupWithIncompletePartitions(Interval interval) {
            PartitionHolder<SegmentLoadInfo> partitionHolder = new PartitionHolder<>(new NumberedPartitionChunk<>(1, 1, segmentLoadInfo));
            List<TimelineObjectHolder<String, SegmentLoadInfo>> ret = new ArrayList<>();
            ret.add(new TimelineObjectHolder<>(Intervals.of(interval3), "v1", partitionHolder));
            return ret;
        }
    };
    EasyMock.reset(inventoryView, databaseRuleManager);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(timeline).once();
    EasyMock.replay(inventoryView, databaseRuleManager);
    Response response3 = dataSourcesResource.isHandOffComplete("dataSource1", interval3, 1, "v1");
    Assert.assertTrue((boolean) response3.getEntity());
    EasyMock.verify(inventoryView, databaseRuleManager);
}
Also used : PartitionHolder(org.apache.druid.timeline.partition.PartitionHolder) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) SegmentLoadInfo(org.apache.druid.client.SegmentLoadInfo) ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) ArrayList(java.util.ArrayList) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Response(javax.ws.rs.core.Response) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 4 with MetadataRuleManager

use of org.apache.druid.metadata.MetadataRuleManager in project druid by druid-io.

the class DruidCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    druidServer = EasyMock.createMock(DruidServer.class);
    serverInventoryView = EasyMock.createMock(BatchServerInventoryView.class);
    segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
    dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
    coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
    metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
    JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
    objectMapper = new DefaultObjectMapper();
    druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
    pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
    loadQueuePeon = new CuratorLoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
    loadQueuePeon.start();
    druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
    loadManagementPeons = new ConcurrentHashMap<>();
    scheduledExecutorFactory = new ScheduledExecutorFactory() {

        @Override
        public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
            return Executors.newSingleThreadScheduledExecutor();
        }
    };
    leaderAnnouncerLatch = new CountDownLatch(1);
    leaderUnannouncerLatch = new CountDownLatch(1);
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, new HashSet<>(), new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Also used : SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) HashSet(java.util.HashSet) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) AtomicReference(java.util.concurrent.atomic.AtomicReference) Duration(org.joda.time.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DruidNode(org.apache.druid.server.DruidNode) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) Before(org.junit.Before)

Example 5 with MetadataRuleManager

use of org.apache.druid.metadata.MetadataRuleManager in project druid by druid-io.

the class DruidCoordinatorTest method testCompactSegmentsDutyWhenCustomDutyGroupEmpty.

@Test
public void testCompactSegmentsDutyWhenCustomDutyGroupEmpty() {
    CoordinatorCustomDutyGroups emptyCustomDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of());
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), emptyCustomDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
    // Since CompactSegments is not enabled in Custom Duty Group, then CompactSegments must be created in IndexingServiceDuties
    List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
    Assert.assertTrue(indexingDuties.stream().anyMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
    // CompactSegments should not exist in Custom Duty Group
    List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
    Assert.assertTrue(compactSegmentsDutyFromCustomGroups.isEmpty());
    // CompactSegments returned by this method should be created using the DruidCoordinatorConfig in the DruidCoordinator
    CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
    Assert.assertNotNull(duty);
    Assert.assertEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
Also used : LookupCoordinatorManager(org.apache.druid.server.lookup.cache.LookupCoordinatorManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) Event(org.apache.druid.java.util.emitter.core.Event) After(org.junit.After) Map(java.util.Map) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) ServerType(org.apache.druid.server.coordination.ServerType) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) Rule(org.apache.druid.server.coordinator.rules.Rule) CuratorUtils(org.apache.druid.curator.CuratorUtils) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) DruidLeaderSelector(org.apache.druid.discovery.DruidLeaderSelector) DataSegment(org.apache.druid.timeline.DataSegment) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) SegmentId(org.apache.druid.timeline.SegmentId) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DruidDataSource(org.apache.druid.client.DruidDataSource) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) Intervals(org.apache.druid.java.util.common.Intervals) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) Duration(org.joda.time.Duration) ZkEnablementConfig(org.apache.druid.curator.ZkEnablementConfig) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) AtomicReference(java.util.concurrent.atomic.AtomicReference) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Nullable(javax.annotation.Nullable) Before(org.junit.Before) Capture(org.easymock.Capture) SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) CuratorTestBase(org.apache.druid.curator.CuratorTestBase) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) CoordinatorCustomDuty(org.apache.druid.server.coordinator.duty.CoordinatorCustomDuty) DruidNode(org.apache.druid.server.DruidNode) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) Assert(org.junit.Assert) Collections(java.util.Collections) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DruidNode(org.apache.druid.server.DruidNode) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

MetadataRuleManager (org.apache.druid.metadata.MetadataRuleManager)7 HashSet (java.util.HashSet)5 CountDownLatch (java.util.concurrent.CountDownLatch)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 PathChildrenCache (org.apache.curator.framework.recipes.cache.PathChildrenCache)5 DataSourcesSnapshot (org.apache.druid.client.DataSourcesSnapshot)5 JacksonConfigManager (org.apache.druid.common.config.JacksonConfigManager)5 NoopServiceAnnouncer (org.apache.druid.curator.discovery.NoopServiceAnnouncer)5 DefaultObjectMapper (org.apache.druid.jackson.DefaultObjectMapper)5 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)4 BatchServerInventoryView (org.apache.druid.client.BatchServerInventoryView)4 DruidServer (org.apache.druid.client.DruidServer)4 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)4 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)4 ScheduledExecutorFactory (org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory)4 SegmentsMetadataManager (org.apache.druid.metadata.SegmentsMetadataManager)4 DruidNode (org.apache.druid.server.DruidNode)4 CoordinatorCustomDutyGroups (org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups)4 Rule (org.apache.druid.server.coordinator.rules.Rule)4 ZkPathsConfig (org.apache.druid.server.initialization.ZkPathsConfig)4