Search in sources :

Example 1 with DruidCluster

use of io.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class DruidCoordinatorCleanupUnneeded method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    Set<DataSegment> availableSegments = params.getAvailableSegments();
    DruidCluster cluster = params.getDruidCluster();
    // cleanup before it finished polling the metadata storage for available segments for the first time.
    if (!availableSegments.isEmpty()) {
        for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
            for (ServerHolder serverHolder : serverHolders) {
                ImmutableDruidServer server = serverHolder.getServer();
                for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
                    for (DataSegment segment : dataSource.getSegments()) {
                        if (!availableSegments.contains(segment)) {
                            LoadQueuePeon queuePeon = params.getLoadManagementPeons().get(server.getName());
                            if (!queuePeon.getSegmentsToDrop().contains(segment)) {
                                queuePeon.dropSegment(segment, new LoadPeonCallback() {

                                    @Override
                                    public void execute() {
                                    }
                                });
                                stats.addToTieredStat("unneededCount", server.getTier(), 1);
                            }
                        }
                    }
                }
            }
        }
    } else {
        log.info("Found 0 availableSegments, skipping the cleanup of segments from historicals. This is done to prevent a race condition in which the coordinator would drop all segments if it started running cleanup before it finished polling the metadata storage for available segments for the first time.");
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) LoadPeonCallback(io.druid.server.coordinator.LoadPeonCallback) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) ServerHolder(io.druid.server.coordinator.ServerHolder) LoadQueuePeon(io.druid.server.coordinator.LoadQueuePeon) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 2 with DruidCluster

use of io.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class DruidCoordinatorRuleRunner method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime());
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // find available segments which are not overshadowed by other segments in DB
    // only those would need to be loaded/dropped
    // anything overshadowed by served segments is dropped automatically by DruidCoordinatorCleanupOvershadowed
    Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = new HashMap<>();
    for (DataSegment segment : params.getAvailableSegments()) {
        VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(segment.getDataSource());
        if (timeline == null) {
            timeline = new VersionedIntervalTimeline<>(Comparators.comparable());
            timelines.put(segment.getDataSource(), timeline);
        }
        timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
    }
    Set<DataSegment> overshadowed = new HashSet<>();
    for (VersionedIntervalTimeline<String, DataSegment> timeline : timelines.values()) {
        for (TimelineObjectHolder<String, DataSegment> holder : timeline.findOvershadowed()) {
            for (DataSegment dataSegment : holder.getObject().payloads()) {
                overshadowed.add(dataSegment);
            }
        }
    }
    Set<DataSegment> nonOvershadowed = new HashSet<>();
    for (DataSegment dataSegment : params.getAvailableSegments()) {
        if (!overshadowed.contains(dataSegment)) {
            nonOvershadowed.add(dataSegment);
        }
    }
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutAvailableSegments().withReplicationManager(replicatorThrottler).withAvailableSegments(nonOvershadowed).build();
    // Run through all matched rules for available segments
    DateTime now = new DateTime();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<String> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    for (DataSegment segment : paramsWithReplicationManager.getAvailableSegments()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getIdentifier());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return paramsWithReplicationManager.buildFromExistingWithoutAvailableSegments().withCoordinatorStats(stats).withAvailableSegments(params.getAvailableSegments()).build();
}
Also used : DruidCoordinatorRuntimeParams(io.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(io.druid.metadata.MetadataRuleManager) HashMap(java.util.HashMap) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) Rule(io.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 3 with DruidCluster

use of io.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class DruidCoordinatorCleanupOvershadowedTest method testRun.

@Test
public void testRun() {
    druidCoordinatorCleanupOvershadowed = new DruidCoordinatorCleanupOvershadowed(coordinator);
    availableSegments = ImmutableList.of(segmentV1, segmentV0, segmentV2);
    druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(druidServer, mockPeon)))));
    EasyMock.expect(druidServer.getDataSources()).andReturn(ImmutableList.of(druidDataSource)).anyTimes();
    EasyMock.expect(druidDataSource.getSegments()).andReturn(ImmutableSet.<DataSegment>of(segmentV1, segmentV2)).anyTimes();
    EasyMock.expect(druidDataSource.getName()).andReturn("test").anyTimes();
    coordinator.removeSegment(segmentV1);
    coordinator.removeSegment(segmentV0);
    EasyMock.expectLastCall();
    EasyMock.replay(mockPeon, coordinator, druidServer, druidDataSource);
    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withAvailableSegments(availableSegments).withCoordinatorStats(new CoordinatorStats()).withDruidCluster(druidCluster).build();
    druidCoordinatorCleanupOvershadowed.run(params);
    EasyMock.verify(coordinator, druidDataSource, druidServer);
}
Also used : DruidCoordinatorRuntimeParams(io.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) ServerHolder(io.druid.server.coordinator.ServerHolder) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) Test(org.junit.Test)

Example 4 with DruidCluster

use of io.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRuleTest method testDrop.

@Test
public void testDrop() throws Exception {
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).anyTimes();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("hot", 0, DruidServer.DEFAULT_TIER, 0);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidServer server1 = new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0);
    server1.addDataSegment(segment.getIdentifier(), segment);
    DruidServer server2 = new DruidServer("serverNorm", "hostNorm", 1000, "historical", DruidServer.DEFAULT_TIER, 0);
    server2.addDataSegment(segment.getIdentifier(), segment);
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon))), DruidServer.DEFAULT_TIER, MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("hot").get() == 1);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get(DruidServer.DEFAULT_TIER).get() == 1);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 5 with DruidCluster

use of io.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRuleTest method testLoadWithNonExistentTier.

@Test
public void testLoadWithNonExistentTier() throws Exception {
    mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("nonExistentTier", 1, "hot", 1);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 1);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)9 DruidCluster (io.druid.server.coordinator.DruidCluster)9 DataSegment (io.druid.timeline.DataSegment)9 ServerHolder (io.druid.server.coordinator.ServerHolder)8 Map (java.util.Map)5 DateTime (org.joda.time.DateTime)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)4 DruidServer (io.druid.client.DruidServer)4 BalancerStrategy (io.druid.server.coordinator.BalancerStrategy)4 CostBalancerStrategyFactory (io.druid.server.coordinator.CostBalancerStrategyFactory)4 Interval (org.joda.time.Interval)4 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)3 ImmutableDruidDataSource (io.druid.client.ImmutableDruidDataSource)2 DruidCoordinatorRuntimeParams (io.druid.server.coordinator.DruidCoordinatorRuntimeParams)2 LoadQueuePeon (io.druid.server.coordinator.LoadQueuePeon)2 VersionedIntervalTimeline (io.druid.timeline.VersionedIntervalTimeline)2 ServiceEmitter (com.metamx.emitter.service.ServiceEmitter)1 ServiceMetricEvent (com.metamx.emitter.service.ServiceMetricEvent)1