Search in sources :

Example 1 with BalancerStrategy

use of io.druid.server.coordinator.BalancerStrategy in project druid by druid-io.

the class LoadRuleTest method testDrop.

@Test
public void testDrop() throws Exception {
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).anyTimes();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("hot", 0, DruidServer.DEFAULT_TIER, 0);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidServer server1 = new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0);
    server1.addDataSegment(segment.getIdentifier(), segment);
    DruidServer server2 = new DruidServer("serverNorm", "hostNorm", 1000, "historical", DruidServer.DEFAULT_TIER, 0);
    server2.addDataSegment(segment.getIdentifier(), segment);
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon))), DruidServer.DEFAULT_TIER, MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("hot").get() == 1);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get(DruidServer.DEFAULT_TIER).get() == 1);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 2 with BalancerStrategy

use of io.druid.server.coordinator.BalancerStrategy in project druid by druid-io.

the class LoadRuleTest method testLoadWithNonExistentTier.

@Test
public void testLoadWithNonExistentTier() throws Exception {
    mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("nonExistentTier", 1, "hot", 1);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 1);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 3 with BalancerStrategy

use of io.druid.server.coordinator.BalancerStrategy in project druid by druid-io.

the class LoadRuleTest method testLoad.

@Test
public void testLoad() throws Exception {
    mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("hot", 1, DruidServer.DEFAULT_TIER, 2);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon))), DruidServer.DEFAULT_TIER, MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverNorm", "hostNorm", 1000, "historical", DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 1);
    Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get(DruidServer.DEFAULT_TIER).get() == 2);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 4 with BalancerStrategy

use of io.druid.server.coordinator.BalancerStrategy in project druid by druid-io.

the class LoadRuleTest method testDropWithNonExistentTier.

@Test
public void testDropWithNonExistentTier() throws Exception {
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).anyTimes();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("nonExistentTier", 1, "hot", 1);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidServer server1 = new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0);
    DruidServer server2 = new DruidServer("serverHo2t", "hostHot2", 1000, "historical", "hot", 0);
    server1.addDataSegment(segment.getIdentifier(), segment);
    server2.addDataSegment(segment.getIdentifier(), segment);
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon), new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("hot").get() == 1);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 5 with BalancerStrategy

use of io.druid.server.coordinator.BalancerStrategy in project druid by druid-io.

the class DruidCoordinatorBalancer method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    final CoordinatorStats stats = new CoordinatorStats();
    final BalancerStrategy strategy = params.getBalancerStrategy();
    final int maxSegmentsToMove = params.getCoordinatorDynamicConfig().getMaxSegmentsToMove();
    for (Map.Entry<String, MinMaxPriorityQueue<ServerHolder>> entry : params.getDruidCluster().getCluster().entrySet()) {
        String tier = entry.getKey();
        if (currentlyMovingSegments.get(tier) == null) {
            currentlyMovingSegments.put(tier, new ConcurrentHashMap<String, BalancerSegmentHolder>());
        }
        if (!currentlyMovingSegments.get(tier).isEmpty()) {
            reduceLifetimes(tier);
            log.info("[%s]: Still waiting on %,d segments to be moved", tier, currentlyMovingSegments.size());
            continue;
        }
        final List<ServerHolder> serverHolderList = Lists.newArrayList(entry.getValue());
        if (serverHolderList.size() <= 1) {
            log.info("[%s]: One or fewer servers found.  Cannot balance.", tier);
            continue;
        }
        int numSegments = 0;
        for (ServerHolder server : serverHolderList) {
            numSegments += server.getServer().getSegments().size();
        }
        if (numSegments == 0) {
            log.info("No segments found.  Cannot balance.");
            continue;
        }
        long unmoved = 0L;
        for (int iter = 0; iter < maxSegmentsToMove; iter++) {
            final BalancerSegmentHolder segmentToMove = strategy.pickSegmentToMove(serverHolderList);
            if (segmentToMove != null && params.getAvailableSegments().contains(segmentToMove.getSegment())) {
                final ServerHolder holder = strategy.findNewSegmentHomeBalancer(segmentToMove.getSegment(), serverHolderList);
                if (holder != null) {
                    moveSegment(segmentToMove, holder.getServer(), params);
                } else {
                    ++unmoved;
                }
            }
        }
        if (unmoved == maxSegmentsToMove) {
            // Cluster should be alive and constantly adjusting
            log.info("No good moves found in tier [%s]", tier);
        }
        stats.addToTieredStat("unmovedCount", tier, unmoved);
        stats.addToTieredStat("movedCount", tier, currentlyMovingSegments.get(tier).size());
        if (params.getCoordinatorDynamicConfig().emitBalancingStats()) {
            strategy.emitStats(tier, stats, serverHolderList);
        }
        log.info("[%s]: Segments Moved: [%d] Segments Let Alone: [%d]", tier, currentlyMovingSegments.get(tier).size(), unmoved);
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) BalancerSegmentHolder(io.druid.server.coordinator.BalancerSegmentHolder) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Map(java.util.Map)

Aggregations

BalancerStrategy (io.druid.server.coordinator.BalancerStrategy)6 CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)6 ServerHolder (io.druid.server.coordinator.ServerHolder)6 Map (java.util.Map)6 DataSegment (io.druid.timeline.DataSegment)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)4 DruidServer (io.druid.client.DruidServer)4 CostBalancerStrategyFactory (io.druid.server.coordinator.CostBalancerStrategyFactory)4 DruidCluster (io.druid.server.coordinator.DruidCluster)4 DateTime (org.joda.time.DateTime)4 Interval (org.joda.time.Interval)4 Test (org.junit.Test)4 MinMaxPriorityQueue (com.google.common.collect.MinMaxPriorityQueue)1 BalancerSegmentHolder (io.druid.server.coordinator.BalancerSegmentHolder)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1