Search in sources :

Example 6 with DruidNode

use of org.apache.druid.server.DruidNode in project druid by druid-io.

the class DruidCoordinatorTest method testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments.

@Test
public void testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments() {
    CoordinatorCustomDutyGroup group = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(new KillSupervisorsCustomDuty(new Duration("PT1S"), null)));
    CoordinatorCustomDutyGroups customDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group));
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), customDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
    // Since CompactSegments is not enabled in Custom Duty Group, then CompactSegments must be created in IndexingServiceDuties
    List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
    Assert.assertTrue(indexingDuties.stream().anyMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
    // CompactSegments should not exist in Custom Duty Group
    List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
    Assert.assertTrue(compactSegmentsDutyFromCustomGroups.isEmpty());
    // CompactSegments returned by this method should be created using the DruidCoordinatorConfig in the DruidCoordinator
    CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
    Assert.assertNotNull(duty);
    Assert.assertEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
Also used : LookupCoordinatorManager(org.apache.druid.server.lookup.cache.LookupCoordinatorManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) Event(org.apache.druid.java.util.emitter.core.Event) After(org.junit.After) Map(java.util.Map) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) ServerType(org.apache.druid.server.coordination.ServerType) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) Rule(org.apache.druid.server.coordinator.rules.Rule) CuratorUtils(org.apache.druid.curator.CuratorUtils) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) DruidLeaderSelector(org.apache.druid.discovery.DruidLeaderSelector) DataSegment(org.apache.druid.timeline.DataSegment) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) SegmentId(org.apache.druid.timeline.SegmentId) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DruidDataSource(org.apache.druid.client.DruidDataSource) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) Intervals(org.apache.druid.java.util.common.Intervals) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) Duration(org.joda.time.Duration) ZkEnablementConfig(org.apache.druid.curator.ZkEnablementConfig) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) AtomicReference(java.util.concurrent.atomic.AtomicReference) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Nullable(javax.annotation.Nullable) Before(org.junit.Before) Capture(org.easymock.Capture) SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) CuratorTestBase(org.apache.druid.curator.CuratorTestBase) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) CoordinatorCustomDuty(org.apache.druid.server.coordinator.duty.CoordinatorCustomDuty) DruidNode(org.apache.druid.server.DruidNode) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) Assert(org.junit.Assert) Collections(java.util.Collections) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) Duration(org.joda.time.Duration) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DruidNode(org.apache.druid.server.DruidNode) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 7 with DruidNode

use of org.apache.druid.server.DruidNode in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorCustomDutyGroupsRunAsExpected.

@Test(timeout = 3000)
public void testCoordinatorCustomDutyGroupsRunAsExpected() throws Exception {
    // Some nessesary setup to start the Coordinator
    JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    EasyMock.expect(segmentsMetadataManager.isPollingDatabasePeriodically()).andReturn(true).anyTimes();
    DruidDataSource dataSource = new DruidDataSource("dataSource1", Collections.emptyMap());
    DataSegment dataSegment = new DataSegment("dataSource1", Intervals.of("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
    dataSource.addSegment(dataSegment);
    DataSourcesSnapshot dataSourcesSnapshot = new DataSourcesSnapshot(ImmutableMap.of(dataSource.getName(), dataSource.toImmutableDruidDataSource()));
    EasyMock.expect(segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments()).andReturn(dataSourcesSnapshot).anyTimes();
    EasyMock.replay(segmentsMetadataManager);
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(serverInventoryView);
    // Create CoordinatorCustomDutyGroups
    // We will have two groups and each group has one duty
    CountDownLatch latch1 = new CountDownLatch(1);
    CoordinatorCustomDuty duty1 = new CoordinatorCustomDuty() {

        @Override
        public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
            latch1.countDown();
            return params;
        }
    };
    CoordinatorCustomDutyGroup group1 = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(duty1));
    CountDownLatch latch2 = new CountDownLatch(1);
    CoordinatorCustomDuty duty2 = new CoordinatorCustomDuty() {

        @Override
        public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
            latch2.countDown();
            return params;
        }
    };
    CoordinatorCustomDutyGroup group2 = new CoordinatorCustomDutyGroup("group2", Duration.standardSeconds(1), ImmutableList.of(duty2));
    CoordinatorCustomDutyGroups groups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group1, group2));
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, new HashSet<>(), groups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
    coordinator.start();
    // Wait until group 1 duty ran for latch1 to countdown
    latch1.await();
    // Wait until group 2 duty ran for latch2 to countdown
    latch2.await();
}
Also used : JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) DataSegment(org.apache.druid.timeline.DataSegment) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) CoordinatorCustomDuty(org.apache.druid.server.coordinator.duty.CoordinatorCustomDuty) DruidNode(org.apache.druid.server.DruidNode) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 8 with DruidNode

use of org.apache.druid.server.DruidNode in project druid by druid-io.

the class CuratorDruidCoordinatorTest method setupView.

private void setupView() throws Exception {
    baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {

        @Override
        public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
            super.registerSegmentCallback(exec, new SegmentCallback() {

                @Override
                public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
                    CallbackAction res = callback.segmentAdded(server, segment);
                    segmentAddedLatch.countDown();
                    return res;
                }

                @Override
                public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
                    CallbackAction res = callback.segmentRemoved(server, segment);
                    segmentRemovedLatch.countDown();
                    return res;
                }

                @Override
                public CallbackAction segmentViewInitialized() {
                    CallbackAction res = callback.segmentViewInitialized();
                    segmentViewInitLatch.countDown();
                    return res;
                }
            });
        }
    };
    serverView = new CoordinatorServerView(baseView, new CoordinatorSegmentWatcherConfig());
    baseView.start();
    sourceLoadQueuePeon.start();
    destinationLoadQueuePeon.start();
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Also used : BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) CoordinatorSegmentWatcherConfig(org.apache.druid.client.CoordinatorSegmentWatcherConfig) Executor(java.util.concurrent.Executor) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DruidNode(org.apache.druid.server.DruidNode) CoordinatorServerView(org.apache.druid.client.CoordinatorServerView) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups)

Example 9 with DruidNode

use of org.apache.druid.server.DruidNode in project druid by druid-io.

the class TieredBrokerHostSelectorTest method setUp.

@Before
public void setUp() {
    druidNodeDiscoveryProvider = EasyMock.createStrictMock(DruidNodeDiscoveryProvider.class);
    node1 = new DiscoveryDruidNode(new DruidNode("hotBroker", "hotHost", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
    node2 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost1", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
    node3 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost2", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
    druidNodeDiscovery = new DruidNodeDiscovery() {

        @Override
        public Collection<DiscoveryDruidNode> getAllNodes() {
            return ImmutableSet.of(node1, node2, node3);
        }

        @Override
        public void registerListener(Listener listener) {
            listener.nodesAdded(ImmutableList.of(node1, node2, node3));
            listener.nodeViewInitialized();
        }
    };
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    brokerSelector = new TieredBrokerHostSelector(new TestRuleManager(null, null), new TieredBrokerConfig() {

        @Override
        public LinkedHashMap<String, String> getTierToBrokerMap() {
            return new LinkedHashMap<String, String>(ImmutableMap.of("hot", "hotBroker", "medium", "mediumBroker", DruidServer.DEFAULT_TIER, "coldBroker"));
        }

        @Override
        public String getDefaultBrokerServiceName() {
            return "hotBroker";
        }
    }, druidNodeDiscoveryProvider, Arrays.asList(new ManualTieredBrokerSelectorStrategy(null), new TimeBoundaryTieredBrokerSelectorStrategy(), new PriorityTieredBrokerSelectorStrategy(0, 1)));
    brokerSelector.start();
}
Also used : DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) LinkedHashMap(java.util.LinkedHashMap) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) Collection(java.util.Collection) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) Before(org.junit.Before)

Example 10 with DruidNode

use of org.apache.druid.server.DruidNode in project druid by druid-io.

the class CuratorDruidLeaderSelectorTest method testSimple.

@Test(timeout = 60_000L)
public void testSimple() throws Exception {
    curator.start();
    curator.blockUntilConnected();
    AtomicReference<String> currLeader = new AtomicReference<>();
    String latchPath = "/testlatchPath";
    CuratorDruidLeaderSelector leaderSelector1 = new CuratorDruidLeaderSelector(curator, new DruidNode("s1", "h1", false, 8080, null, true, false), latchPath);
    leaderSelector1.registerListener(new DruidLeaderSelector.Listener() {

        @Override
        public void becomeLeader() {
            logger.info("listener1.becomeLeader().");
            currLeader.set("h1:8080");
            throw new RuntimeException("I am Rogue.");
        }

        @Override
        public void stopBeingLeader() {
            logger.info("listener1.stopBeingLeader().");
            throw new RuntimeException("I said I am Rogue.");
        }
    });
    while (!"h1:8080".equals(currLeader.get())) {
        logger.info("current leader = [%s]", currLeader.get());
        Thread.sleep(100);
    }
    Assert.assertTrue(leaderSelector1.localTerm() >= 1);
    CuratorDruidLeaderSelector leaderSelector2 = new CuratorDruidLeaderSelector(curator, new DruidNode("s2", "h2", false, 8080, null, true, false), latchPath);
    leaderSelector2.registerListener(new DruidLeaderSelector.Listener() {

        private AtomicInteger attemptCount = new AtomicInteger(0);

        @Override
        public void becomeLeader() {
            logger.info("listener2.becomeLeader().");
            if (attemptCount.getAndIncrement() < 1) {
                throw new RuntimeException("will become leader on next attempt.");
            }
            currLeader.set("h2:8080");
        }

        @Override
        public void stopBeingLeader() {
            logger.info("listener2.stopBeingLeader().");
            throw new RuntimeException("I am broken.");
        }
    });
    while (!"h2:8080".equals(currLeader.get())) {
        logger.info("current leader = [%s]", currLeader.get());
        Thread.sleep(100);
    }
    Assert.assertTrue(leaderSelector2.isLeader());
    Assert.assertEquals("http://h2:8080", leaderSelector1.getCurrentLeader());
    Assert.assertEquals(2, leaderSelector2.localTerm());
    CuratorDruidLeaderSelector leaderSelector3 = new CuratorDruidLeaderSelector(curator, new DruidNode("s3", "h3", false, 8080, null, true, false), latchPath);
    leaderSelector3.registerListener(new DruidLeaderSelector.Listener() {

        @Override
        public void becomeLeader() {
            logger.info("listener3.becomeLeader().");
            currLeader.set("h3:8080");
        }

        @Override
        public void stopBeingLeader() {
            logger.info("listener3.stopBeingLeader().");
        }
    });
    leaderSelector2.unregisterListener();
    while (!"h3:8080".equals(currLeader.get())) {
        logger.info("current leader = [%s]", currLeader.get());
        Thread.sleep(100);
    }
    Assert.assertTrue(leaderSelector3.isLeader());
    Assert.assertEquals("http://h3:8080", leaderSelector1.getCurrentLeader());
    Assert.assertEquals(1, leaderSelector3.localTerm());
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicReference(java.util.concurrent.atomic.AtomicReference) DruidNode(org.apache.druid.server.DruidNode) DruidLeaderSelector(org.apache.druid.discovery.DruidLeaderSelector) Test(org.junit.Test)

Aggregations

DruidNode (org.apache.druid.server.DruidNode)61 Test (org.junit.Test)41 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)25 Self (org.apache.druid.guice.annotations.Self)19 ImmutableList (com.google.common.collect.ImmutableList)18 List (java.util.List)18 DefaultObjectMapper (org.apache.druid.jackson.DefaultObjectMapper)16 ZkPathsConfig (org.apache.druid.server.initialization.ZkPathsConfig)16 Binder (com.google.inject.Binder)15 Injector (com.google.inject.Injector)15 AtomicReference (java.util.concurrent.atomic.AtomicReference)15 DiscoveryDruidNode (org.apache.druid.discovery.DiscoveryDruidNode)14 HashSet (java.util.HashSet)12 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)12 DruidNodeDiscoveryProvider (org.apache.druid.discovery.DruidNodeDiscoveryProvider)12 TaskStatus (org.apache.druid.indexer.TaskStatus)12 Module (com.google.inject.Module)11 CuratorFramework (org.apache.curator.framework.CuratorFramework)11 NoopTask (org.apache.druid.indexing.common.task.NoopTask)11 Task (org.apache.druid.indexing.common.task.Task)11