use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.
the class RunRules method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
CoordinatorStats stats = new CoordinatorStats();
DruidCluster cluster = params.getDruidCluster();
if (cluster.isEmpty()) {
log.warn("Uh... I have no servers. Not assigning anything...");
return params;
}
// Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
// eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
// as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
// to unload such segments in UnloadUnusedSegments.
Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
for (String tier : cluster.getTierNames()) {
replicatorThrottler.updateReplicationState(tier);
}
DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
// Run through all matched rules for used segments
DateTime now = DateTimes.nowUtc();
MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
int missingRules = 0;
final Set<String> broadcastDatasources = new HashSet<>();
for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
for (Rule rule : rules) {
// executes before BalanceSegments.
if (rule instanceof BroadcastDistributionRule) {
broadcastDatasources.add(dataSource.getName());
break;
}
}
}
for (DataSegment segment : params.getUsedSegments()) {
if (overshadowed.contains(segment.getId())) {
// Skipping overshadowed segments
continue;
}
List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
boolean foundMatchingRule = false;
for (Rule rule : rules) {
if (rule.appliesTo(segment, now)) {
if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
}
stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
foundMatchingRule = true;
break;
}
}
if (!foundMatchingRule) {
if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
segmentsWithMissingRules.add(segment.getId());
}
missingRules++;
}
}
if (!segmentsWithMissingRules.isEmpty()) {
log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
}
return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.
the class UnloadUnusedSegments method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
CoordinatorStats stats = new CoordinatorStats();
Set<DataSegment> usedSegments = params.getUsedSegments();
DruidCluster cluster = params.getDruidCluster();
Map<String, Boolean> broadcastStatusByDatasource = new HashMap<>();
for (String broadcastDatasource : params.getBroadcastDatasources()) {
broadcastStatusByDatasource.put(broadcastDatasource, true);
}
for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
for (ServerHolder serverHolder : serverHolders) {
handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
}
}
for (ServerHolder serverHolder : cluster.getBrokers()) {
handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
}
for (ServerHolder serverHolder : cluster.getRealtimes()) {
handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, true, broadcastStatusByDatasource);
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.
the class LoadRule method drop.
/**
* @param stats {@link CoordinatorStats} to accumulate assignment statistics.
*/
private void drop(final DruidCoordinatorRuntimeParams params, final DataSegment segment, final CoordinatorStats stats) {
final DruidCluster druidCluster = params.getDruidCluster();
// This enforces that loading is completed before we attempt to drop stuffs as a safety measure.
if (loadingInProgress(druidCluster)) {
log.info("Loading in progress, skipping drop until loading is complete");
return;
}
for (final Object2IntMap.Entry<String> entry : currentReplicants.object2IntEntrySet()) {
final String tier = entry.getKey();
final NavigableSet<ServerHolder> holders = druidCluster.getHistoricalsByTier(tier);
final int numDropped;
if (holders == null) {
log.makeAlert("No holders found for tier[%s]", tier).emit();
numDropped = 0;
} else {
final int currentReplicantsInTier = entry.getIntValue();
final int numToDrop = currentReplicantsInTier - targetReplicants.getOrDefault(tier, 0);
if (numToDrop > 0) {
numDropped = dropForTier(numToDrop, holders, segment, params.getBalancerStrategy());
} else {
numDropped = 0;
}
}
stats.addToTieredStat(DROPPED_COUNT, tier, numDropped);
}
}
use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.
the class LoadRuleTest method testLoad.
@Test
public void testLoad() {
EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
final LoadQueuePeon mockPeon = createEmptyPeon();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1, DruidServer.DEFAULT_TIER, 2));
final DataSegment segment = createDataSegment("foo");
throttler.registerReplicantCreation(DruidServer.DEFAULT_TIER, segment.getId(), "hostNorm");
EasyMock.expectLastCall().once();
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(3);
EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer(), mockPeon)).addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, DruidServer.DEFAULT_TIER));
EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.
the class LoadRuleTest method testLoadWithNonExistentTier.
@Test
public void testLoadWithNonExistentTier() {
final LoadQueuePeon mockPeon = createEmptyPeon();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(1);
EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
LoadRule rule = createLoadRule(ImmutableMap.of("nonExistentTier", 1, "hot", 1));
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 0).toImmutableDruidServer(), mockPeon)).build();
final DataSegment segment = createDataSegment("foo");
CoordinatorStats stats = rule.run(null, CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster(), false)).withReplicationManager(throttler).withBalancerStrategy(mockBalancerStrategy).withUsedSegmentsInTest(segment).build(), segment);
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
Aggregations