use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.
the class DruidCoordinatorCleanupOvershadowed method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
CoordinatorStats stats = new CoordinatorStats();
// Unservice old partitions if we've had enough time to make sure we aren't flapping with old data
if (params.hasDeletionWaitTimeElapsed()) {
DruidCluster cluster = params.getDruidCluster();
Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = Maps.newHashMap();
for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
for (ServerHolder serverHolder : serverHolders) {
ImmutableDruidServer server = serverHolder.getServer();
for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSource.getName());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<>(Comparators.comparable());
timelines.put(dataSource.getName(), timeline);
}
for (DataSegment segment : dataSource.getSegments()) {
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
}
}
}
}
//Remove all segments in db that are overshadowed by served segments
for (DataSegment dataSegment : params.getAvailableSegments()) {
VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion())) {
coordinator.removeSegment(dataSegment);
stats.addToGlobalStat("overShadowedCount", 1);
}
}
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.
the class DruidCoordinatorLogger method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
DruidCluster cluster = params.getDruidCluster();
CoordinatorStats stats = params.getCoordinatorStats();
ServiceEmitter emitter = params.getEmitter();
Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount");
if (assigned != null) {
for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) {
log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/assigned/count", assigned);
Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount");
if (dropped != null) {
for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) {
log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/dropped/count", dropped);
emitTieredStats(emitter, "segment/cost/raw", stats.getPerTierStats().get("initialCost"));
emitTieredStats(emitter, "segment/cost/normalization", stats.getPerTierStats().get("normalization"));
emitTieredStats(emitter, "segment/moved/count", stats.getPerTierStats().get("movedCount"));
emitTieredStats(emitter, "segment/deleted/count", stats.getPerTierStats().get("deletedCount"));
Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand");
if (normalized != null) {
emitTieredStats(emitter, "segment/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() {
@Override
public Number transformEntry(String key, AtomicLong value) {
return value.doubleValue() / 1000d;
}
}));
}
Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount");
if (unneeded != null) {
for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) {
log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/unneeded/count", stats.getPerTierStats().get("unneededCount"));
emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStats().get("overShadowedCount")));
Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount");
if (moved != null) {
for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) {
log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get());
}
}
final Map<String, AtomicLong> unmoved = stats.getPerTierStats().get("unmovedCount");
if (unmoved != null) {
for (Map.Entry<String, AtomicLong> entry : unmoved.entrySet()) {
log.info("[%s] : Let alone %,d segment(s)", entry.getKey(), entry.getValue().get());
}
}
log.info("Load Queues:");
for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
for (ServerHolder serverHolder : serverHolders) {
ImmutableDruidServer server = serverHolder.getServer();
LoadQueuePeon queuePeon = serverHolder.getPeon();
log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
if (log.isDebugEnabled()) {
for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
log.debug("Segment to load[%s]", segment);
}
for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
log.debug("Segment to drop[%s]", segment);
}
}
}
}
// Emit coordinator metrics
final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet();
for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) {
String serverName = entry.getKey();
LoadQueuePeon queuePeon = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
}
for (Map.Entry<String, AtomicLong> entry : coordinator.getSegmentAvailability().entrySet()) {
String datasource = entry.getKey();
Long count = entry.getValue().get();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/unavailable/count", count));
}
for (Map.Entry<String, CountingMap<String>> entry : coordinator.getReplicationStatus().entrySet()) {
String tier = entry.getKey();
CountingMap<String> datasourceAvailabilities = entry.getValue();
for (Map.Entry<String, AtomicLong> datasourceAvailability : datasourceAvailabilities.entrySet()) {
String datasource = datasourceAvailability.getKey();
Long count = datasourceAvailability.getValue().get();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/underReplicated/count", count));
}
}
// Emit segment metrics
CountingMap<String> segmentSizes = new CountingMap<String>();
CountingMap<String> segmentCounts = new CountingMap<String>();
for (DruidDataSource dataSource : params.getDataSources()) {
for (DataSegment segment : dataSource.getSegments()) {
segmentSizes.add(dataSource.getName(), segment.getSize());
segmentCounts.add(dataSource.getName(), 1L);
}
}
for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) {
String dataSource = entry.getKey();
Long size = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", size));
}
for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) {
String dataSource = entry.getKey();
Long count = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", count));
}
return params;
}
use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.
the class LoadRule method run.
@Override
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment) {
final CoordinatorStats stats = new CoordinatorStats();
final Set<DataSegment> availableSegments = params.getAvailableSegments();
final Map<String, Integer> loadStatus = Maps.newHashMap();
int totalReplicantsInCluster = params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier());
for (Map.Entry<String, Integer> entry : getTieredReplicants().entrySet()) {
final String tier = entry.getKey();
final int expectedReplicantsInTier = entry.getValue();
final int totalReplicantsInTier = params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier(), tier);
final int loadedReplicantsInTier = params.getSegmentReplicantLookup().getLoadedReplicants(segment.getIdentifier(), tier);
final MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().getServersByTier(tier);
if (serverQueue == null) {
log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", tier).emit();
continue;
}
final List<ServerHolder> serverHolderList = Lists.newArrayList(serverQueue);
final BalancerStrategy strategy = params.getBalancerStrategy();
if (availableSegments.contains(segment)) {
CoordinatorStats assignStats = assign(params.getReplicationManager(), tier, totalReplicantsInCluster, expectedReplicantsInTier, totalReplicantsInTier, strategy, serverHolderList, segment);
stats.accumulate(assignStats);
totalReplicantsInCluster += assignStats.getPerTierStats().get(assignedCount).get(tier).get();
}
loadStatus.put(tier, expectedReplicantsInTier - loadedReplicantsInTier);
}
// Remove over-replication
stats.accumulate(drop(loadStatus, segment, params));
return stats;
}
use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.
the class DruidCoordinatorCleanupOvershadowedTest method testRun.
@Test
public void testRun() {
druidCoordinatorCleanupOvershadowed = new DruidCoordinatorCleanupOvershadowed(coordinator);
availableSegments = ImmutableList.of(segmentV1, segmentV0, segmentV2);
druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(druidServer, mockPeon)))));
EasyMock.expect(druidServer.getDataSources()).andReturn(ImmutableList.of(druidDataSource)).anyTimes();
EasyMock.expect(druidDataSource.getSegments()).andReturn(ImmutableSet.<DataSegment>of(segmentV1, segmentV2)).anyTimes();
EasyMock.expect(druidDataSource.getName()).andReturn("test").anyTimes();
coordinator.removeSegment(segmentV1);
coordinator.removeSegment(segmentV0);
EasyMock.expectLastCall();
EasyMock.replay(mockPeon, coordinator, druidServer, druidDataSource);
DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withAvailableSegments(availableSegments).withCoordinatorStats(new CoordinatorStats()).withDruidCluster(druidCluster).build();
druidCoordinatorCleanupOvershadowed.run(params);
EasyMock.verify(coordinator, druidDataSource, druidServer);
}
use of io.druid.server.coordinator.ServerHolder in project druid by druid-io.
the class LoadRuleTest method testDrop.
@Test
public void testDrop() throws Exception {
mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).anyTimes();
EasyMock.replay(mockPeon);
LoadRule rule = new LoadRule() {
private final Map<String, Integer> tiers = ImmutableMap.of("hot", 0, DruidServer.DEFAULT_TIER, 0);
@Override
public Map<String, Integer> getTieredReplicants() {
return tiers;
}
@Override
public int getNumReplicants(String tier) {
return tiers.get(tier);
}
@Override
public String getType() {
return "test";
}
@Override
public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) {
return true;
}
@Override
public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
return true;
}
};
DruidServer server1 = new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0);
server1.addDataSegment(segment.getIdentifier(), segment);
DruidServer server2 = new DruidServer("serverNorm", "hostNorm", 1000, "historical", DruidServer.DEFAULT_TIER, 0);
server2.addDataSegment(segment.getIdentifier(), segment);
DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon))), DruidServer.DEFAULT_TIER, MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
CoordinatorStats stats = rule.run(null, DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)).withReplicationManager(throttler).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withAvailableSegments(Arrays.asList(segment)).build(), segment);
Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("hot").get() == 1);
Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get(DruidServer.DEFAULT_TIER).get() == 1);
exec.shutdown();
}
Aggregations