Search in sources :

Example 11 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class RunRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
    // eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
    // as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
    // to unload such segments in UnloadUnusedSegments.
    Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
    // Run through all matched rules for used segments
    DateTime now = DateTimes.nowUtc();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    final Set<String> broadcastDatasources = new HashSet<>();
    for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
        for (Rule rule : rules) {
            // executes before BalanceSegments.
            if (rule instanceof BroadcastDistributionRule) {
                broadcastDatasources.add(dataSource.getName());
                break;
            }
        }
    }
    for (DataSegment segment : params.getUsedSegments()) {
        if (overshadowed.contains(segment.getId())) {
            // Skipping overshadowed segments
            continue;
        }
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
                    log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
                    paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
                }
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getId());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) Rule(org.apache.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 12 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class UnloadUnusedSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    Set<DataSegment> usedSegments = params.getUsedSegments();
    DruidCluster cluster = params.getDruidCluster();
    Map<String, Boolean> broadcastStatusByDatasource = new HashMap<>();
    for (String broadcastDatasource : params.getBroadcastDatasources()) {
        broadcastStatusByDatasource.put(broadcastDatasource, true);
    }
    for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
        }
    }
    for (ServerHolder serverHolder : cluster.getBrokers()) {
        handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
    }
    for (ServerHolder serverHolder : cluster.getRealtimes()) {
        handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, true, broadcastStatusByDatasource);
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment)

Example 13 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRule method drop.

/**
 * @param stats {@link CoordinatorStats} to accumulate assignment statistics.
 */
private void drop(final DruidCoordinatorRuntimeParams params, final DataSegment segment, final CoordinatorStats stats) {
    final DruidCluster druidCluster = params.getDruidCluster();
    // This enforces that loading is completed before we attempt to drop stuffs as a safety measure.
    if (loadingInProgress(druidCluster)) {
        log.info("Loading in progress, skipping drop until loading is complete");
        return;
    }
    for (final Object2IntMap.Entry<String> entry : currentReplicants.object2IntEntrySet()) {
        final String tier = entry.getKey();
        final NavigableSet<ServerHolder> holders = druidCluster.getHistoricalsByTier(tier);
        final int numDropped;
        if (holders == null) {
            log.makeAlert("No holders found for tier[%s]", tier).emit();
            numDropped = 0;
        } else {
            final int currentReplicantsInTier = entry.getIntValue();
            final int numToDrop = currentReplicantsInTier - targetReplicants.getOrDefault(tier, 0);
            if (numToDrop > 0) {
                numDropped = dropForTier(numToDrop, holders, segment, params.getBalancerStrategy());
            } else {
                numDropped = 0;
            }
        }
        stats.addToTieredStat(DROPPED_COUNT, tier, numDropped);
    }
}
Also used : ServerHolder(org.apache.druid.server.coordinator.ServerHolder) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster)

Example 14 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRuleTest method testLoad.

@Test
public void testLoad() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1, DruidServer.DEFAULT_TIER, 2));
    final DataSegment segment = createDataSegment("foo");
    throttler.registerReplicantCreation(DruidServer.DEFAULT_TIER, segment.getId(), "hostNorm");
    EasyMock.expectLastCall().once();
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(3);
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer(), mockPeon)).addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)).build();
    CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, DruidServer.DEFAULT_TIER));
    EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 15 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRuleTest method testLoadWithNonExistentTier.

@Test
public void testLoadWithNonExistentTier() {
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(1);
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    LoadRule rule = createLoadRule(ImmutableMap.of("nonExistentTier", 1, "hot", 1));
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 0).toImmutableDruidServer(), mockPeon)).build();
    final DataSegment segment = createDataSegment("foo");
    CoordinatorStats stats = rule.run(null, CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster(), false)).withReplicationManager(throttler).withBalancerStrategy(mockBalancerStrategy).withUsedSegmentsInTest(segment).build(), segment);
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)21 DataSegment (org.apache.druid.timeline.DataSegment)21 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)20 Test (org.junit.Test)17 DruidServer (org.apache.druid.client.DruidServer)15 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)7 HashMap (java.util.HashMap)5 LoadQueuePeonTester (org.apache.druid.server.coordinator.LoadQueuePeonTester)5 ArrayList (java.util.ArrayList)3 List (java.util.List)3 DateTimes (org.apache.druid.java.util.common.DateTimes)3 Intervals (org.apache.druid.java.util.common.Intervals)3 ServerType (org.apache.druid.server.coordination.ServerType)3 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)3 DruidClusterBuilder (org.apache.druid.server.coordinator.DruidClusterBuilder)3 SegmentReplicantLookup (org.apache.druid.server.coordinator.SegmentReplicantLookup)3 NoneShardSpec (org.apache.druid.timeline.partition.NoneShardSpec)3