use of io.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class LoadRule method drop.
private CoordinatorStats drop(final Map<String, Integer> loadStatus, final DataSegment segment, final DruidCoordinatorRuntimeParams params) {
CoordinatorStats stats = new CoordinatorStats();
// Make sure we have enough loaded replicants in the correct tiers in the cluster before doing anything
for (Integer leftToLoad : loadStatus.values()) {
if (leftToLoad > 0) {
return stats;
}
}
final ReplicationThrottler replicationManager = params.getReplicationManager();
// Find all instances of this segment across tiers
Map<String, Integer> replicantsByTier = params.getSegmentReplicantLookup().getClusterTiers(segment.getIdentifier());
for (Map.Entry<String, Integer> entry : replicantsByTier.entrySet()) {
final String tier = entry.getKey();
int loadedNumReplicantsForTier = entry.getValue();
int expectedNumReplicantsForTier = getNumReplicants(tier);
stats.addToTieredStat(droppedCount, tier, 0);
MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().get(tier);
if (serverQueue == null) {
log.makeAlert("No holders found for tier[%s]", entry.getKey()).emit();
continue;
}
List<ServerHolder> droppedServers = Lists.newArrayList();
while (loadedNumReplicantsForTier > expectedNumReplicantsForTier) {
final ServerHolder holder = serverQueue.pollLast();
if (holder == null) {
log.warn("Wtf, holder was null? I have no servers serving [%s]?", segment.getIdentifier());
break;
}
if (holder.isServingSegment(segment)) {
holder.getPeon().dropSegment(segment, null);
--loadedNumReplicantsForTier;
stats.addToTieredStat(droppedCount, tier, 1);
}
droppedServers.add(holder);
}
serverQueue.addAll(droppedServers);
}
return stats;
}
use of io.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class LoadRule method assign.
private CoordinatorStats assign(final ReplicationThrottler replicationManager, final String tier, final int totalReplicantsInCluster, final int expectedReplicantsInTier, final int totalReplicantsInTier, final BalancerStrategy strategy, final List<ServerHolder> serverHolderList, final DataSegment segment) {
final CoordinatorStats stats = new CoordinatorStats();
stats.addToTieredStat(assignedCount, tier, 0);
int currReplicantsInTier = totalReplicantsInTier;
int currTotalReplicantsInCluster = totalReplicantsInCluster;
while (currReplicantsInTier < expectedReplicantsInTier) {
boolean replicate = currTotalReplicantsInCluster > 0;
if (replicate && !replicationManager.canCreateReplicant(tier)) {
break;
}
final ServerHolder holder = strategy.findNewSegmentHomeReplicator(segment, serverHolderList);
if (holder == null) {
log.warn("Not enough [%s] servers or node capacity to assign segment[%s]! Expected Replicants[%d]", tier, segment.getIdentifier(), expectedReplicantsInTier);
break;
}
if (replicate) {
replicationManager.registerReplicantCreation(tier, segment.getIdentifier(), holder.getServer().getHost());
}
holder.getPeon().loadSegment(segment, new LoadPeonCallback() {
@Override
public void execute() {
replicationManager.unregisterReplicantCreation(tier, segment.getIdentifier(), holder.getServer().getHost());
}
});
stats.addToTieredStat(assignedCount, tier, 1);
++currReplicantsInTier;
++currTotalReplicantsInCluster;
}
return stats;
}
use of io.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class DruidCoordinatorCleanupUnneeded method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
CoordinatorStats stats = new CoordinatorStats();
Set<DataSegment> availableSegments = params.getAvailableSegments();
DruidCluster cluster = params.getDruidCluster();
// cleanup before it finished polling the metadata storage for available segments for the first time.
if (!availableSegments.isEmpty()) {
for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
for (ServerHolder serverHolder : serverHolders) {
ImmutableDruidServer server = serverHolder.getServer();
for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
for (DataSegment segment : dataSource.getSegments()) {
if (!availableSegments.contains(segment)) {
LoadQueuePeon queuePeon = params.getLoadManagementPeons().get(server.getName());
if (!queuePeon.getSegmentsToDrop().contains(segment)) {
queuePeon.dropSegment(segment, new LoadPeonCallback() {
@Override
public void execute() {
}
});
stats.addToTieredStat("unneededCount", server.getTier(), 1);
}
}
}
}
}
}
} else {
log.info("Found 0 availableSegments, skipping the cleanup of segments from historicals. This is done to prevent a race condition in which the coordinator would drop all segments if it started running cleanup before it finished polling the metadata storage for available segments for the first time.");
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of io.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class DruidCoordinatorRuleRunner method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime());
CoordinatorStats stats = new CoordinatorStats();
DruidCluster cluster = params.getDruidCluster();
if (cluster.isEmpty()) {
log.warn("Uh... I have no servers. Not assigning anything...");
return params;
}
// find available segments which are not overshadowed by other segments in DB
// only those would need to be loaded/dropped
// anything overshadowed by served segments is dropped automatically by DruidCoordinatorCleanupOvershadowed
Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = new HashMap<>();
for (DataSegment segment : params.getAvailableSegments()) {
VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(segment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<>(Comparators.comparable());
timelines.put(segment.getDataSource(), timeline);
}
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
}
Set<DataSegment> overshadowed = new HashSet<>();
for (VersionedIntervalTimeline<String, DataSegment> timeline : timelines.values()) {
for (TimelineObjectHolder<String, DataSegment> holder : timeline.findOvershadowed()) {
for (DataSegment dataSegment : holder.getObject().payloads()) {
overshadowed.add(dataSegment);
}
}
}
Set<DataSegment> nonOvershadowed = new HashSet<>();
for (DataSegment dataSegment : params.getAvailableSegments()) {
if (!overshadowed.contains(dataSegment)) {
nonOvershadowed.add(dataSegment);
}
}
for (String tier : cluster.getTierNames()) {
replicatorThrottler.updateReplicationState(tier);
}
DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutAvailableSegments().withReplicationManager(replicatorThrottler).withAvailableSegments(nonOvershadowed).build();
// Run through all matched rules for available segments
DateTime now = new DateTime();
MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
final List<String> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
int missingRules = 0;
for (DataSegment segment : paramsWithReplicationManager.getAvailableSegments()) {
List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
boolean foundMatchingRule = false;
for (Rule rule : rules) {
if (rule.appliesTo(segment, now)) {
stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
foundMatchingRule = true;
break;
}
}
if (!foundMatchingRule) {
if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
segmentsWithMissingRules.add(segment.getIdentifier());
}
missingRules++;
}
}
if (!segmentsWithMissingRules.isEmpty()) {
log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
}
return paramsWithReplicationManager.buildFromExistingWithoutAvailableSegments().withCoordinatorStats(stats).withAvailableSegments(params.getAvailableSegments()).build();
}
use of io.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class DropRule method run.
@Override
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment) {
CoordinatorStats stats = new CoordinatorStats();
coordinator.removeSegment(segment);
stats.addToGlobalStat("deletedCount", 1);
return stats;
}
Aggregations