Search in sources :

Example 1 with ServerHolder

use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class LoadRule method drop.

private CoordinatorStats drop(final Map<String, Integer> loadStatus, final DataSegment segment, final DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    // Make sure we have enough loaded replicants in the correct tiers in the cluster before doing anything
    for (Integer leftToLoad : loadStatus.values()) {
        if (leftToLoad > 0) {
            return stats;
        }
    }
    final ReplicationThrottler replicationManager = params.getReplicationManager();
    // Find all instances of this segment across tiers
    Map<String, Integer> replicantsByTier = params.getSegmentReplicantLookup().getClusterTiers(segment.getIdentifier());
    for (Map.Entry<String, Integer> entry : replicantsByTier.entrySet()) {
        final String tier = entry.getKey();
        int loadedNumReplicantsForTier = entry.getValue();
        int expectedNumReplicantsForTier = getNumReplicants(tier);
        stats.addToTieredStat(droppedCount, tier, 0);
        MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().get(tier);
        if (serverQueue == null) {
            log.makeAlert("No holders found for tier[%s]", entry.getKey()).emit();
            continue;
        }
        List<ServerHolder> droppedServers = Lists.newArrayList();
        while (loadedNumReplicantsForTier > expectedNumReplicantsForTier) {
            final ServerHolder holder = serverQueue.pollLast();
            if (holder == null) {
                log.warn("Wtf, holder was null?  I have no servers serving [%s]?", segment.getIdentifier());
                break;
            }
            if (holder.isServingSegment(segment)) {
                holder.getPeon().dropSegment(segment, null);
                --loadedNumReplicantsForTier;
                stats.addToTieredStat(droppedCount, tier, 1);
            }
            droppedServers.add(holder);
        }
        serverQueue.addAll(droppedServers);
    }
    return stats;
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) ReplicationThrottler(io.druid.server.coordinator.ReplicationThrottler) ServerHolder(io.druid.server.coordinator.ServerHolder) Map(java.util.Map)

Example 2 with ServerHolder

use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class LoadRule method assign.

private CoordinatorStats assign(final ReplicationThrottler replicationManager, final String tier, final int totalReplicantsInCluster, final int expectedReplicantsInTier, final int totalReplicantsInTier, final BalancerStrategy strategy, final List<ServerHolder> serverHolderList, final DataSegment segment) {
    final CoordinatorStats stats = new CoordinatorStats();
    stats.addToTieredStat(assignedCount, tier, 0);
    int currReplicantsInTier = totalReplicantsInTier;
    int currTotalReplicantsInCluster = totalReplicantsInCluster;
    while (currReplicantsInTier < expectedReplicantsInTier) {
        boolean replicate = currTotalReplicantsInCluster > 0;
        if (replicate && !replicationManager.canCreateReplicant(tier)) {
            break;
        }
        final ServerHolder holder = strategy.findNewSegmentHomeReplicator(segment, serverHolderList);
        if (holder == null) {
            log.warn("Not enough [%s] servers or node capacity to assign segment[%s]! Expected Replicants[%d]", tier, segment.getIdentifier(), expectedReplicantsInTier);
            break;
        }
        if (replicate) {
            replicationManager.registerReplicantCreation(tier, segment.getIdentifier(), holder.getServer().getHost());
        }
        holder.getPeon().loadSegment(segment, new LoadPeonCallback() {

            @Override
            public void execute() {
                replicationManager.unregisterReplicantCreation(tier, segment.getIdentifier(), holder.getServer().getHost());
            }
        });
        stats.addToTieredStat(assignedCount, tier, 1);
        ++currReplicantsInTier;
        ++currTotalReplicantsInCluster;
    }
    return stats;
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) LoadPeonCallback(io.druid.server.coordinator.LoadPeonCallback) ServerHolder(io.druid.server.coordinator.ServerHolder)

Example 3 with ServerHolder

use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class DruidCoordinatorCleanupUnneeded method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    Set<DataSegment> availableSegments = params.getAvailableSegments();
    DruidCluster cluster = params.getDruidCluster();
    // cleanup before it finished polling the metadata storage for available segments for the first time.
    if (!availableSegments.isEmpty()) {
        for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
            for (ServerHolder serverHolder : serverHolders) {
                ImmutableDruidServer server = serverHolder.getServer();
                for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
                    for (DataSegment segment : dataSource.getSegments()) {
                        if (!availableSegments.contains(segment)) {
                            LoadQueuePeon queuePeon = params.getLoadManagementPeons().get(server.getName());
                            if (!queuePeon.getSegmentsToDrop().contains(segment)) {
                                queuePeon.dropSegment(segment, new LoadPeonCallback() {

                                    @Override
                                    public void execute() {
                                    }
                                });
                                stats.addToTieredStat("unneededCount", server.getTier(), 1);
                            }
                        }
                    }
                }
            }
        }
    } else {
        log.info("Found 0 availableSegments, skipping the cleanup of segments from historicals. This is done to prevent a race condition in which the coordinator would drop all segments if it started running cleanup before it finished polling the metadata storage for available segments for the first time.");
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) LoadPeonCallback(io.druid.server.coordinator.LoadPeonCallback) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) ServerHolder(io.druid.server.coordinator.ServerHolder) LoadQueuePeon(io.druid.server.coordinator.LoadQueuePeon) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 4 with ServerHolder

use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class DruidCoordinatorCleanupOvershadowedTest method testRun.

@Test
public void testRun() {
    druidCoordinatorCleanupOvershadowed = new DruidCoordinatorCleanupOvershadowed(coordinator);
    availableSegments = ImmutableList.of(segmentV1, segmentV0, segmentV2);
    druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(druidServer, mockPeon)))));
    EasyMock.expect(druidServer.getDataSources()).andReturn(ImmutableList.of(druidDataSource)).anyTimes();
    EasyMock.expect(druidDataSource.getSegments()).andReturn(ImmutableSet.<DataSegment>of(segmentV1, segmentV2)).anyTimes();
    EasyMock.expect(druidDataSource.getName()).andReturn("test").anyTimes();
    coordinator.removeSegment(segmentV1);
    coordinator.removeSegment(segmentV0);
    EasyMock.expectLastCall();
    EasyMock.replay(mockPeon, coordinator, druidServer, druidDataSource);
    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withAvailableSegments(availableSegments).withCoordinatorStats(new CoordinatorStats()).withDruidCluster(druidCluster).build();
    druidCoordinatorCleanupOvershadowed.run(params);
    EasyMock.verify(coordinator, druidDataSource, druidServer);
}
Also used : DruidCoordinatorRuntimeParams(io.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) ServerHolder(io.druid.server.coordinator.ServerHolder) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) Test(org.junit.Test)

Example 5 with ServerHolder

use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class LoadRuleTest method testDrop.

@Test
public void testDrop() throws Exception {
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).anyTimes();
    EasyMock.replay(mockPeon);
    LoadRule rule = new LoadRule() {

        private final Map<String, Integer> tiers = ImmutableMap.of("hot", 0, DruidServer.DEFAULT_TIER, 0);

        @Override
        public Map<String, Integer> getTieredReplicants() {
            return tiers;
        }

        @Override
        public int getNumReplicants(String tier) {
            return tiers.get(tier);
        }

        @Override
        public String getType() {
            return "test";
        }

        @Override
        public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
            return true;
        }

        @Override
        public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
            return true;
        }
    };
    DruidServer server1 = new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0);
    server1.addDataSegment(segment.getIdentifier(), segment);
    DruidServer server2 = new DruidServer("serverNorm", "hostNorm", 1000, "historical", DruidServer.DEFAULT_TIER, 0);
    server2.addDataSegment(segment.getIdentifier(), segment);
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon))), DruidServer.DEFAULT_TIER, MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("hot").get() == 1);
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get(DruidServer.DEFAULT_TIER).get() == 1);
    exec.shutdown();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DruidServer(io.druid.client.DruidServer) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BalancerStrategy(io.druid.server.coordinator.BalancerStrategy) ServerHolder(io.druid.server.coordinator.ServerHolder) CostBalancerStrategyFactory(io.druid.server.coordinator.CostBalancerStrategyFactory) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

ServerHolder (io.druid.server.coordinator.ServerHolder)13 CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)12 DataSegment (io.druid.timeline.DataSegment)10 Map (java.util.Map)9 DruidCluster (io.druid.server.coordinator.DruidCluster)8 BalancerStrategy (io.druid.server.coordinator.BalancerStrategy)6 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)4 DruidServer (io.druid.client.DruidServer)4 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)4 CostBalancerStrategyFactory (io.druid.server.coordinator.CostBalancerStrategyFactory)4 DateTime (org.joda.time.DateTime)4 Interval (org.joda.time.Interval)4 LoadPeonCallback (io.druid.server.coordinator.LoadPeonCallback)3 LoadQueuePeon (io.druid.server.coordinator.LoadQueuePeon)3 ImmutableDruidDataSource (io.druid.client.ImmutableDruidDataSource)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 MinMaxPriorityQueue (com.google.common.collect.MinMaxPriorityQueue)1 ServiceEmitter (com.metamx.emitter.service.ServiceEmitter)1