Search in sources :

Example 11 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class LoadRuleTest method testLoadPrimaryAssignDoesNotOverAssign.

@Test
public void testLoadPrimaryAssignDoesNotOverAssign() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
    final DataSegment segment = createDataSegment("foo");
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).anyTimes();
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    ImmutableDruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    ImmutableDruidServer server2 = new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, mockPeon), new ServerHolder(server2, mockPeon)).build();
    CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    // ensure multiple runs don't assign primary segment again if at replication count
    final LoadQueuePeon loadingPeon = createLoadingPeon(ImmutableList.of(segment), false);
    EasyMock.replay(loadingPeon);
    DruidCluster afterLoad = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, loadingPeon), new ServerHolder(server2, mockPeon)).build();
    CoordinatorStats statsAfterLoadPrimary = rule.run(null, makeCoordinatorRuntimeParams(afterLoad, segment), segment);
    Assert.assertEquals(0, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 12 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class LoadRuleTest method testOverAssignForTimedOutSegments.

@Test
public void testOverAssignForTimedOutSegments() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
    final LoadQueuePeon emptyPeon = createEmptyPeon();
    emptyPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
    final DataSegment segment = createDataSegment("foo");
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).anyTimes();
    EasyMock.replay(throttler, emptyPeon, mockBalancerStrategy);
    ImmutableDruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    ImmutableDruidServer server2 = new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, emptyPeon), new ServerHolder(server2, emptyPeon)).build();
    CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParamsWithLoadReplicationOnTimeout(druidCluster, segment), segment);
    // Ensure that the segment is assigned to one of the historicals
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    // Ensure that the primary segment is assigned again in case the peon timed out on loading the segment
    final LoadQueuePeon slowLoadingPeon = createLoadingPeon(ImmutableList.of(segment), true);
    EasyMock.replay(slowLoadingPeon);
    DruidCluster withLoadTimeout = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, slowLoadingPeon), new ServerHolder(server2, emptyPeon)).build();
    CoordinatorStats statsAfterLoadPrimary = rule.run(null, makeCoordinatorRuntimeParamsWithLoadReplicationOnTimeout(withLoadTimeout, segment), segment);
    Assert.assertEquals(1L, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    EasyMock.verify(throttler, emptyPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 13 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method testRunMultipleCompactionTaskSlotsWithUseAutoScaleSlotsUnderMaxSlot.

@Test
public void testRunMultipleCompactionTaskSlotsWithUseAutoScaleSlotsUnderMaxSlot() {
    int maxCompactionSlot = 100;
    Assert.assertFalse(maxCompactionSlot < MAXIMUM_CAPACITY_WITH_AUTO_SCALE);
    final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
    leaderClient.start();
    final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
    final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
    final CoordinatorStats stats = doCompactSegments(compactSegments, createCompactionConfigs(), maxCompactionSlot, true);
    Assert.assertEquals(MAXIMUM_CAPACITY_WITH_AUTO_SCALE, stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT));
    Assert.assertEquals(MAXIMUM_CAPACITY_WITH_AUTO_SCALE, stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT));
    Assert.assertEquals(MAXIMUM_CAPACITY_WITH_AUTO_SCALE, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
}
Also used : HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Test(org.junit.Test)

Example 14 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method testMakeStatsForDataSourceWithSkipped.

@Test
public void testMakeStatsForDataSourceWithSkipped() {
    // Only test and validate for one datasource for simplicity.
    // This dataSource has three intervals skipped (3 intervals, 1200 byte, 12 segments skipped by auto compaction)
    // Note that these segment used to be 10 bytes each in other tests, we are increasing it to 100 bytes each here
    // so that they will be skipped by the auto compaction.
    String dataSourceName = DATA_SOURCE_PREFIX + 1;
    List<DataSegment> segments = new ArrayList<>();
    for (int j : new int[] { 0, 1, 2, 3, 7, 8 }) {
        for (int k = 0; k < 4; k++) {
            DataSegment beforeNoon = createSegment(dataSourceName, j, true, k);
            DataSegment afterNoon = createSegment(dataSourceName, j, false, k);
            if (j == 3) {
                // Make two intervals on this day skipped (two skipped intervals back-to-back)
                beforeNoon = beforeNoon.withSize(100);
                afterNoon = afterNoon.withSize(100);
            }
            if (j == 1) {
                // Make one interval on this day skipped
                afterNoon = afterNoon.withSize(100);
            }
            segments.add(beforeNoon);
            segments.add(afterNoon);
        }
    }
    dataSources = DataSourcesSnapshot.fromUsedSegments(segments, ImmutableMap.of()).getUsedSegmentsTimelinesPerDataSource();
    final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
    leaderClient.start();
    final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
    final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
    // Before any compaction, we do not have any snapshot of compactions
    Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
    Assert.assertEquals(0, autoCompactionSnapshots.size());
    // 3 intervals, 1200 byte (each segment is 100 bytes), 12 segments will be skipped by auto compaction
    for (int compactionRunCount = 0; compactionRunCount < 8; compactionRunCount++) {
        // Do a cycle of auto compaction which creates one compaction task
        final CoordinatorStats stats = doCompactSegments(compactSegments);
        Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
        verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, dataSourceName, // Minus 120 bytes accounting for the three skipped segments' original size
        TOTAL_BYTE_PER_DATASOURCE - 120 - 40 * (compactionRunCount + 1), 40 * (compactionRunCount + 1), 1200, TOTAL_INTERVAL_PER_DATASOURCE - 3 - (compactionRunCount + 1), (compactionRunCount + 1), 3, TOTAL_SEGMENT_PER_DATASOURCE - 12 - 4 * (compactionRunCount + 1), 4 + 2 * (compactionRunCount), 12);
    }
    // Test that stats does not change (and is still correct) when auto compaction runs with everything is fully compacted
    final CoordinatorStats stats = doCompactSegments(compactSegments);
    Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
    verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, dataSourceName, 0, // Minus 120 bytes accounting for the three skipped segments' original size
    TOTAL_BYTE_PER_DATASOURCE - 120, 1200, 0, TOTAL_INTERVAL_PER_DATASOURCE - 3, 3, 0, 16, 12);
}
Also used : HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) Test(org.junit.Test)

Example 15 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method testRunWithLockedIntervalsNoSkip.

@Test
public void testRunWithLockedIntervalsNoSkip() {
    Mockito.when(COORDINATOR_CONFIG.getCompactionSkipLockedIntervals()).thenReturn(false);
    final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
    leaderClient.start();
    HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
    // Lock all intervals for all the dataSources
    final String datasource0 = DATA_SOURCE_PREFIX + 0;
    leaderClient.lockedIntervals.computeIfAbsent(datasource0, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
    final String datasource1 = DATA_SOURCE_PREFIX + 1;
    leaderClient.lockedIntervals.computeIfAbsent(datasource1, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
    final String datasource2 = DATA_SOURCE_PREFIX + 2;
    leaderClient.lockedIntervals.computeIfAbsent(datasource2, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
    // Verify that no locked intervals are skipped
    CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
    int maxTaskSlots = partitionsSpec instanceof SingleDimensionPartitionsSpec ? 5 : 3;
    final CoordinatorStats stats = doCompactSegments(compactSegments, createCompactionConfigs(1), maxTaskSlots);
    Assert.assertEquals(3, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
    Assert.assertEquals(3, leaderClient.submittedCompactionTasks.size());
    leaderClient.submittedCompactionTasks.forEach(task -> {
        System.out.println(task.getDataSource() + " : " + task.getIoConfig().getInputSpec().getInterval());
    });
    // Verify that tasks are submitted for the latest interval of each dataSource
    final Map<String, Interval> datasourceToInterval = new HashMap<>();
    leaderClient.submittedCompactionTasks.forEach(task -> datasourceToInterval.put(task.getDataSource(), task.getIoConfig().getInputSpec().getInterval()));
    Assert.assertEquals(Intervals.of("2017-01-09T00:00:00Z/2017-01-09T12:00:00Z"), datasourceToInterval.get(datasource0));
    Assert.assertEquals(Intervals.of("2017-01-09T00:00:00Z/2017-01-09T12:00:00Z"), datasourceToInterval.get(datasource1));
    Assert.assertEquals(Intervals.of("2017-01-09T00:00:00Z/2017-01-09T12:00:00Z"), datasourceToInterval.get(datasource2));
}
Also used : HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) ArgumentMatchers(org.mockito.ArgumentMatchers) HttpMethod(org.jboss.netty.handler.codec.http.HttpMethod) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) BooleanSupplier(java.util.function.BooleanSupplier) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) UserCompactionTaskQueryTuningConfig(org.apache.druid.server.coordinator.UserCompactionTaskQueryTuningConfig) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) ClientCompactionIOConfig(org.apache.druid.client.indexing.ClientCompactionIOConfig) IndexingTotalWorkerCapacityInfo(org.apache.druid.client.indexing.IndexingTotalWorkerCapacityInfo) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) ClientCompactionIntervalSpec(org.apache.druid.client.indexing.ClientCompactionIntervalSpec) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) UserCompactionTaskTransformConfig(org.apache.druid.server.coordinator.UserCompactionTaskTransformConfig) StandardCharsets(java.nio.charset.StandardCharsets) TaskState(org.apache.druid.indexer.TaskState) ClientTaskQuery(org.apache.druid.client.indexing.ClientTaskQuery) HttpVersion(org.jboss.netty.handler.codec.http.HttpVersion) TransformSpec(org.apache.druid.segment.transform.TransformSpec) Streams(org.apache.druid.utils.Streams) IndexingWorkerInfo(org.apache.druid.client.indexing.IndexingWorkerInfo) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) HashBasedNumberedShardSpec(org.apache.druid.timeline.partition.HashBasedNumberedShardSpec) UserCompactionTaskIOConfig(org.apache.druid.server.coordinator.UserCompactionTaskIOConfig) RunWith(org.junit.runner.RunWith) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) Before(org.junit.Before) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) TaskLocation(org.apache.druid.indexer.TaskLocation) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Test(org.junit.Test) IOException(java.io.IOException) EasyMock(org.easymock.EasyMock) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) RunnerTaskState(org.apache.druid.indexer.RunnerTaskState) HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) DruidNode(org.apache.druid.server.DruidNode) Preconditions(com.google.common.base.Preconditions) Assert(org.junit.Assert) Nullable(junitparams.converters.Nullable) MutableInt(org.apache.commons.lang3.mutable.MutableInt) UserCompactionTaskDimensionsConfig(org.apache.druid.server.coordinator.UserCompactionTaskDimensionsConfig) URL(java.net.URL) BiFunction(java.util.function.BiFunction) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) IndexingServiceClient(org.apache.druid.client.indexing.IndexingServiceClient) CompactionState(org.apache.druid.timeline.CompactionState) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) DynamicPartitionsSpec(org.apache.druid.indexer.partitions.DynamicPartitionsSpec) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Parameterized(org.junit.runners.Parameterized) DateTimes(org.apache.druid.java.util.common.DateTimes) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) CoordinatorRuntimeParamsTestHelpers(org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers) ImmutableMap(com.google.common.collect.ImmutableMap) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) Collectors(java.util.stream.Collectors) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) NodeRole(org.apache.druid.discovery.NodeRole) DruidLeaderClient(org.apache.druid.discovery.DruidLeaderClient) Intervals(org.apache.druid.java.util.common.Intervals) ClientCompactionTaskDimensionsSpec(org.apache.druid.client.indexing.ClientCompactionTaskDimensionsSpec) HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) HashMap(java.util.HashMap) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) IndexingWorker(org.apache.druid.client.indexing.IndexingWorker) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) ArgumentCaptor(org.mockito.ArgumentCaptor) ImmutableList(com.google.common.collect.ImmutableList) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Request(org.apache.druid.java.util.http.client.Request) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Period(org.joda.time.Period) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) Granularities(org.apache.druid.java.util.common.granularity.Granularities) Mockito(org.mockito.Mockito) SingleDimensionPartitionsSpec(org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec) NullHandling(org.apache.druid.common.config.NullHandling) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) StringFullResponseHolder(org.apache.druid.java.util.http.client.response.StringFullResponseHolder) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) Collections(java.util.Collections) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SingleDimensionPartitionsSpec(org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)45 DataSegment (org.apache.druid.timeline.DataSegment)31 Test (org.junit.Test)31 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)24 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 DruidServer (org.apache.druid.client.DruidServer)15 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)14 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 HttpIndexingServiceClient (org.apache.druid.client.indexing.HttpIndexingServiceClient)11 HashMap (java.util.HashMap)10 ArrayList (java.util.ArrayList)9 AutoCompactionSnapshot (org.apache.druid.server.coordinator.AutoCompactionSnapshot)9 List (java.util.List)7 Map (java.util.Map)6 DateTimes (org.apache.druid.java.util.common.DateTimes)6 Intervals (org.apache.druid.java.util.common.Intervals)6 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)6 Assert (org.junit.Assert)6 Before (org.junit.Before)6