Search in sources :

Example 6 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRuleTest method testLoadPrimaryAssignDoesNotOverAssign.

@Test
public void testLoadPrimaryAssignDoesNotOverAssign() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
    final DataSegment segment = createDataSegment("foo");
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).anyTimes();
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    ImmutableDruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    ImmutableDruidServer server2 = new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, mockPeon), new ServerHolder(server2, mockPeon)).build();
    CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    // ensure multiple runs don't assign primary segment again if at replication count
    final LoadQueuePeon loadingPeon = createLoadingPeon(ImmutableList.of(segment), false);
    EasyMock.replay(loadingPeon);
    DruidCluster afterLoad = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, loadingPeon), new ServerHolder(server2, mockPeon)).build();
    CoordinatorStats statsAfterLoadPrimary = rule.run(null, makeCoordinatorRuntimeParams(afterLoad, segment), segment);
    Assert.assertEquals(0, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 7 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class LoadRuleTest method testOverAssignForTimedOutSegments.

@Test
public void testOverAssignForTimedOutSegments() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
    final LoadQueuePeon emptyPeon = createEmptyPeon();
    emptyPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
    final DataSegment segment = createDataSegment("foo");
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).anyTimes();
    EasyMock.replay(throttler, emptyPeon, mockBalancerStrategy);
    ImmutableDruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    ImmutableDruidServer server2 = new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, emptyPeon), new ServerHolder(server2, emptyPeon)).build();
    CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParamsWithLoadReplicationOnTimeout(druidCluster, segment), segment);
    // Ensure that the segment is assigned to one of the historicals
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    // Ensure that the primary segment is assigned again in case the peon timed out on loading the segment
    final LoadQueuePeon slowLoadingPeon = createLoadingPeon(ImmutableList.of(segment), true);
    EasyMock.replay(slowLoadingPeon);
    DruidCluster withLoadTimeout = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, slowLoadingPeon), new ServerHolder(server2, emptyPeon)).build();
    CoordinatorStats statsAfterLoadPrimary = rule.run(null, makeCoordinatorRuntimeParamsWithLoadReplicationOnTimeout(withLoadTimeout, segment), segment);
    Assert.assertEquals(1L, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    EasyMock.verify(throttler, emptyPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 8 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class BroadcastDistributionRuleTest method testBroadcastToMultipleDataSources.

@Test
public void testBroadcastToMultipleDataSources() {
    final ForeverBroadcastDistributionRule rule = new ForeverBroadcastDistributionRule();
    CoordinatorStats stats = rule.run(null, makeCoordinartorRuntimeParams(druidCluster, smallSegment, largeSegments.get(0), largeSegments.get(1), largeSegments.get(2), largeSegments2.get(0), largeSegments2.get(1)), smallSegment);
    Assert.assertEquals(5L, stats.getGlobalStat(LoadRule.ASSIGNED_COUNT));
    Assert.assertFalse(stats.hasPerTierStats());
    Assert.assertTrue(holdersOfLargeSegments.stream().allMatch(holder -> holder.getPeon().getSegmentsToLoad().contains(smallSegment)));
    Assert.assertTrue(holdersOfLargeSegments2.stream().allMatch(holder -> holder.getPeon().getSegmentsToLoad().contains(smallSegment)));
    Assert.assertFalse(holderOfSmallSegment.getPeon().getSegmentsToLoad().contains(smallSegment));
}
Also used : DateTimes(org.apache.druid.java.util.common.DateTimes) CoordinatorRuntimeParamsTestHelpers(org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers) Intervals(org.apache.druid.java.util.common.Intervals) DruidServer(org.apache.druid.client.DruidServer) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Test(org.junit.Test) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) ArrayList(java.util.ArrayList) SegmentReplicantLookup(org.apache.druid.server.coordinator.SegmentReplicantLookup) List(java.util.List) ServerType(org.apache.druid.server.coordination.ServerType) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) Assert(org.junit.Assert) DruidClusterBuilder(org.apache.druid.server.coordinator.DruidClusterBuilder) LoadQueuePeonTester(org.apache.druid.server.coordinator.LoadQueuePeonTester) Before(org.junit.Before) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Test(org.junit.Test)

Example 9 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class BroadcastDistributionRuleTest method testBroadcastToAllServers.

@Test
public void testBroadcastToAllServers() {
    final ForeverBroadcastDistributionRule rule = new ForeverBroadcastDistributionRule();
    CoordinatorStats stats = rule.run(null, makeCoordinartorRuntimeParams(druidCluster, smallSegment, largeSegments.get(0), largeSegments.get(1), largeSegments.get(2), largeSegments2.get(0), largeSegments2.get(1)), smallSegment);
    Assert.assertEquals(5L, stats.getGlobalStat(LoadRule.ASSIGNED_COUNT));
    Assert.assertFalse(stats.hasPerTierStats());
    Assert.assertTrue(druidCluster.getAllServers().stream().allMatch(holder -> holder.isLoadingSegment(smallSegment) || holder.isServingSegment(smallSegment)));
}
Also used : DateTimes(org.apache.druid.java.util.common.DateTimes) CoordinatorRuntimeParamsTestHelpers(org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers) Intervals(org.apache.druid.java.util.common.Intervals) DruidServer(org.apache.druid.client.DruidServer) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Test(org.junit.Test) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) ArrayList(java.util.ArrayList) SegmentReplicantLookup(org.apache.druid.server.coordinator.SegmentReplicantLookup) List(java.util.List) ServerType(org.apache.druid.server.coordination.ServerType) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) Assert(org.junit.Assert) DruidClusterBuilder(org.apache.druid.server.coordinator.DruidClusterBuilder) LoadQueuePeonTester(org.apache.druid.server.coordinator.LoadQueuePeonTester) Before(org.junit.Before) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Test(org.junit.Test)

Example 10 with DruidCluster

use of org.apache.druid.server.coordinator.DruidCluster in project druid by druid-io.

the class EmitClusterStatsAndMetrics method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    stats.forEachTieredStat("assignedCount", (final String tier, final long count) -> {
        log.info("[%s] : Assigned %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/assigned/count", tier, count);
    });
    stats.forEachTieredStat("droppedCount", (final String tier, final long count) -> {
        log.info("[%s] : Dropped %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/dropped/count", tier, count);
    });
    emitTieredStats(emitter, "segment/cost/raw", stats, "initialCost");
    emitTieredStats(emitter, "segment/cost/normalization", stats, "normalization");
    emitTieredStats(emitter, "segment/moved/count", stats, "movedCount");
    emitTieredStats(emitter, "segment/deleted/count", stats, "deletedCount");
    stats.forEachTieredStat("normalizedInitialCostTimesOneThousand", (final String tier, final long count) -> {
        emitTieredStat(emitter, "segment/cost/normalized", tier, count / 1000d);
    });
    stats.forEachTieredStat("unneededCount", (final String tier, final long count) -> {
        log.info("[%s] : Removed %s unneeded segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/unneeded/count", tier, count);
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStat("overShadowedCount")));
    stats.forEachTieredStat("movedCount", (final String tier, final long count) -> {
        log.info("[%s] : Moved %,d segment(s)", tier, count);
    });
    stats.forEachTieredStat("unmovedCount", (final String tier, final long count) -> {
        log.info("[%s] : Let alone %,d segment(s)", tier, count);
    });
    log.info("Load Queues:");
    for (Iterable<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType().toString(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
            stats.addToTieredStat(TOTAL_CAPACITY, server.getTier(), server.getMaxSize());
            stats.addToTieredStat(TOTAL_HISTORICAL_COUNT, server.getTier(), 1);
        }
    }
    params.getDatabaseRuleManager().getAllRules().values().forEach(rules -> rules.forEach(rule -> {
        if (rule instanceof LoadRule) {
            ((LoadRule) rule).getTieredReplicants().forEach((tier, replica) -> stats.accumulateMaxTieredStat(MAX_REPLICATION_FACTOR, tier, replica));
        }
    }));
    emitTieredStats(emitter, "tier/required/capacity", stats, LoadRule.REQUIRED_CAPACITY);
    emitTieredStats(emitter, "tier/total/capacity", stats, TOTAL_CAPACITY);
    emitTieredStats(emitter, "tier/replication/factor", stats, MAX_REPLICATION_FACTOR);
    emitTieredStats(emitter, "tier/historical/count", stats, TOTAL_HISTORICAL_COUNT);
    // Emit coordinator metrics
    params.getLoadManagementPeons().forEach((final String serverName, final LoadQueuePeon queuePeon) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    });
    coordinator.computeNumsUnavailableUsedSegmentsPerDataSource().object2IntEntrySet().forEach((final Object2IntMap.Entry<String> entry) -> {
        final String dataSource = entry.getKey();
        final int numUnavailableUsedSegmentsInDataSource = entry.getIntValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/unavailable/count", numUnavailableUsedSegmentsInDataSource));
    });
    coordinator.computeUnderReplicationCountsPerDataSourcePerTier().forEach((final String tier, final Object2LongMap<String> underReplicationCountsPerDataSource) -> {
        for (final Object2LongMap.Entry<String> entry : underReplicationCountsPerDataSource.object2LongEntrySet()) {
            final String dataSource = entry.getKey();
            final long underReplicationCount = entry.getLongValue();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/underReplicated/count", underReplicationCount));
        }
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("compact/task/count", stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/maxSlot/count", stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/availableSlot/count", stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT)));
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/compacted/count", count));
    });
    // Emit segment metrics
    params.getUsedSegmentsTimelinesPerDataSource().forEach((String dataSource, VersionedIntervalTimeline<String, DataSegment> dataSourceWithUsedSegments) -> {
        long totalSizeOfUsedSegments = dataSourceWithUsedSegments.iterateAllObjects().stream().mapToLong(DataSegment::getSize).sum();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", totalSizeOfUsedSegments));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", dataSourceWithUsedSegments.getNumObjects()));
    });
    // Emit coordinator runtime stats
    emitDutyStats(emitter, "coordinator/time", stats, "runtime");
    return params;
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Logger(org.apache.druid.java.util.common.logger.Logger) DruidMetrics(org.apache.druid.query.DruidMetrics) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Aggregations

DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)21 DataSegment (org.apache.druid.timeline.DataSegment)21 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)20 Test (org.junit.Test)17 DruidServer (org.apache.druid.client.DruidServer)15 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)7 HashMap (java.util.HashMap)5 LoadQueuePeonTester (org.apache.druid.server.coordinator.LoadQueuePeonTester)5 ArrayList (java.util.ArrayList)3 List (java.util.List)3 DateTimes (org.apache.druid.java.util.common.DateTimes)3 Intervals (org.apache.druid.java.util.common.Intervals)3 ServerType (org.apache.druid.server.coordination.ServerType)3 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)3 DruidClusterBuilder (org.apache.druid.server.coordinator.DruidClusterBuilder)3 SegmentReplicantLookup (org.apache.druid.server.coordinator.SegmentReplicantLookup)3 NoneShardSpec (org.apache.druid.timeline.partition.NoneShardSpec)3