Search in sources :

Example 36 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class LoadRuleTest method testLoadPriority.

@Test
public void testLoadPriority() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(false).anyTimes();
    final LoadQueuePeon mockPeon1 = createEmptyPeon();
    final LoadQueuePeon mockPeon2 = createEmptyPeon();
    mockPeon2.loadSegment(EasyMock.anyObject(), EasyMock.isNull());
    EasyMock.expectLastCall().once();
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(2);
    EasyMock.replay(throttler, mockPeon1, mockPeon2, mockBalancerStrategy);
    final LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 10, "tier2", 10));
    final DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", new ServerHolder(new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, "tier1", 0).toImmutableDruidServer(), mockPeon1)).addTier("tier2", new ServerHolder(new DruidServer("server2", "host2", null, 1000, ServerType.HISTORICAL, "tier2", 1).toImmutableDruidServer(), mockPeon2), new ServerHolder(new DruidServer("server3", "host3", null, 1000, ServerType.HISTORICAL, "tier2", 1).toImmutableDruidServer(), mockPeon2)).build();
    final DataSegment segment = createDataSegment("foo");
    final CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
    Assert.assertEquals(0L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier1"));
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier2"));
    EasyMock.verify(throttler, mockPeon1, mockPeon2, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 37 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class LoadRuleTest method testDropDuringDecommissioning.

/**
 * 2 servers with a segment, one server decommissioning.
 * Should drop a segment from both.
 */
@Test
public void testDropDuringDecommissioning() {
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().times(2);
    EasyMock.expect(mockBalancerStrategy.pickServersToDrop(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(4);
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 0));
    final DataSegment segment1 = createDataSegment("foo1");
    final DataSegment segment2 = createDataSegment("foo2");
    DruidServer server1 = createServer("tier1");
    server1.addDataSegment(segment1);
    DruidServer server2 = createServer("tier1");
    server2.addDataSegment(segment2);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", new ServerHolder(server1.toImmutableDruidServer(), mockPeon, true), new ServerHolder(server2.toImmutableDruidServer(), mockPeon, false)).build();
    DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, segment1, segment2);
    CoordinatorStats stats = rule.run(null, params, segment1);
    Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
    stats = rule.run(null, params, segment2);
    Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
    EasyMock.verify(throttler, mockPeon);
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 38 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class UnloadUnusedSegmentsTest method test_unloadUnusedSegmentsFromAllServers.

@Test
public void test_unloadUnusedSegmentsFromAllServers() {
    mockDruidServer(historicalServer, ServerType.HISTORICAL, "historical", DruidServer.DEFAULT_TIER, 30L, 100L, segments, dataSources);
    mockDruidServer(historicalServerTier2, ServerType.HISTORICAL, "historicalTier2", "tier2", 30L, 100L, segments, dataSources);
    mockDruidServer(brokerServer, ServerType.BROKER, "broker", DruidServer.DEFAULT_TIER, 30L, 100L, segments, dataSources);
    mockDruidServer(indexerServer, ServerType.INDEXER_EXECUTOR, "indexer", DruidServer.DEFAULT_TIER, 30L, 100L, segmentsForRealtime, dataSourcesForRealtime);
    // Mock stuff that the coordinator needs
    mockCoordinator(coordinator);
    mockRuleManager(databaseRuleManager);
    // We keep datasource2 segments only, drop datasource1 and broadcastDatasource from all servers
    // realtimeSegment is intentionally missing from the set, to match how a realtime tasks's unpublished segments
    // will not appear in the coordinator's view of used segments.
    Set<DataSegment> usedSegments = ImmutableSet.of(segment2);
    DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(DruidClusterBuilder.newBuilder().addTier(DruidServer.DEFAULT_TIER, new ServerHolder(historicalServer, historicalPeon, false)).addTier("tier2", new ServerHolder(historicalServerTier2, historicalTier2Peon, false)).withBrokers(new ServerHolder(brokerServer, brokerPeon, false)).withRealtimes(new ServerHolder(indexerServer, indexerPeon, false)).build()).withLoadManagementPeons(ImmutableMap.of("historical", historicalPeon, "historicalTier2", historicalTier2Peon, "broker", brokerPeon, "indexer", indexerPeon)).withUsedSegmentsInTest(usedSegments).withBroadcastDatasources(broadcastDatasourceNames).withDatabaseRuleManager(databaseRuleManager).build();
    params = new UnloadUnusedSegments().run(params);
    CoordinatorStats stats = params.getCoordinatorStats();
    // We drop segment1 and broadcast1 from all servers, realtimeSegment is not dropped by the indexer
    Assert.assertEquals(5, stats.getTieredStat("unneededCount", DruidServer.DEFAULT_TIER));
    Assert.assertEquals(2, stats.getTieredStat("unneededCount", "tier2"));
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 39 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class BroadcastDistributionRuleTest method testBroadcastToSingleDataSource.

@Test
public void testBroadcastToSingleDataSource() {
    final ForeverBroadcastDistributionRule rule = new ForeverBroadcastDistributionRule();
    CoordinatorStats stats = rule.run(null, makeCoordinartorRuntimeParams(druidCluster, smallSegment, largeSegments.get(0), largeSegments.get(1), largeSegments.get(2), largeSegments2.get(0), largeSegments2.get(1)), smallSegment);
    Assert.assertEquals(5L, stats.getGlobalStat(LoadRule.ASSIGNED_COUNT));
    Assert.assertFalse(stats.hasPerTierStats());
    Assert.assertTrue(holdersOfLargeSegments.stream().allMatch(holder -> holder.getPeon().getSegmentsToLoad().contains(smallSegment)));
    Assert.assertTrue(holdersOfLargeSegments2.stream().allMatch(holder -> holder.getPeon().getSegmentsToLoad().contains(smallSegment)));
    Assert.assertTrue(holderOfSmallSegment.isServingSegment(smallSegment));
}
Also used : DateTimes(org.apache.druid.java.util.common.DateTimes) CoordinatorRuntimeParamsTestHelpers(org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers) Intervals(org.apache.druid.java.util.common.Intervals) DruidServer(org.apache.druid.client.DruidServer) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Test(org.junit.Test) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) ArrayList(java.util.ArrayList) SegmentReplicantLookup(org.apache.druid.server.coordinator.SegmentReplicantLookup) List(java.util.List) ServerType(org.apache.druid.server.coordination.ServerType) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) Assert(org.junit.Assert) DruidClusterBuilder(org.apache.druid.server.coordinator.DruidClusterBuilder) LoadQueuePeonTester(org.apache.druid.server.coordinator.LoadQueuePeonTester) Before(org.junit.Before) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Test(org.junit.Test)

Example 40 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method testRunWithLockedIntervals.

@Test
public void testRunWithLockedIntervals() {
    final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
    leaderClient.start();
    HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
    // Lock all intervals for dataSource_1 and dataSource_2
    final String datasource1 = DATA_SOURCE_PREFIX + 1;
    leaderClient.lockedIntervals.computeIfAbsent(datasource1, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
    final String datasource2 = DATA_SOURCE_PREFIX + 2;
    leaderClient.lockedIntervals.computeIfAbsent(datasource2, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
    // Lock all intervals but one for dataSource_0
    final String datasource0 = DATA_SOURCE_PREFIX + 0;
    leaderClient.lockedIntervals.computeIfAbsent(datasource0, k -> new ArrayList<>()).add(Intervals.of("2017-01-01T13:00:00Z/2017-02-01"));
    // Verify that locked intervals are skipped and only one compaction task
    // is submitted for dataSource_0
    CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
    final CoordinatorStats stats = doCompactSegments(compactSegments, createCompactionConfigs(2), 4);
    Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
    Assert.assertEquals(1, leaderClient.submittedCompactionTasks.size());
    final ClientCompactionTaskQuery compactionTask = leaderClient.submittedCompactionTasks.get(0);
    Assert.assertEquals(datasource0, compactionTask.getDataSource());
    Assert.assertEquals(Intervals.of("2017-01-01T00:00:00/2017-01-01T12:00:00"), compactionTask.getIoConfig().getInputSpec().getInterval());
}
Also used : HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) ArgumentMatchers(org.mockito.ArgumentMatchers) HttpMethod(org.jboss.netty.handler.codec.http.HttpMethod) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) BooleanSupplier(java.util.function.BooleanSupplier) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) UserCompactionTaskQueryTuningConfig(org.apache.druid.server.coordinator.UserCompactionTaskQueryTuningConfig) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) ClientCompactionIOConfig(org.apache.druid.client.indexing.ClientCompactionIOConfig) IndexingTotalWorkerCapacityInfo(org.apache.druid.client.indexing.IndexingTotalWorkerCapacityInfo) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) ClientCompactionIntervalSpec(org.apache.druid.client.indexing.ClientCompactionIntervalSpec) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) UserCompactionTaskTransformConfig(org.apache.druid.server.coordinator.UserCompactionTaskTransformConfig) StandardCharsets(java.nio.charset.StandardCharsets) TaskState(org.apache.druid.indexer.TaskState) ClientTaskQuery(org.apache.druid.client.indexing.ClientTaskQuery) HttpVersion(org.jboss.netty.handler.codec.http.HttpVersion) TransformSpec(org.apache.druid.segment.transform.TransformSpec) Streams(org.apache.druid.utils.Streams) IndexingWorkerInfo(org.apache.druid.client.indexing.IndexingWorkerInfo) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) HashBasedNumberedShardSpec(org.apache.druid.timeline.partition.HashBasedNumberedShardSpec) UserCompactionTaskIOConfig(org.apache.druid.server.coordinator.UserCompactionTaskIOConfig) RunWith(org.junit.runner.RunWith) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) Before(org.junit.Before) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) TaskLocation(org.apache.druid.indexer.TaskLocation) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Test(org.junit.Test) IOException(java.io.IOException) EasyMock(org.easymock.EasyMock) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) RunnerTaskState(org.apache.druid.indexer.RunnerTaskState) HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) DruidNode(org.apache.druid.server.DruidNode) Preconditions(com.google.common.base.Preconditions) Assert(org.junit.Assert) Nullable(junitparams.converters.Nullable) MutableInt(org.apache.commons.lang3.mutable.MutableInt) UserCompactionTaskDimensionsConfig(org.apache.druid.server.coordinator.UserCompactionTaskDimensionsConfig) URL(java.net.URL) BiFunction(java.util.function.BiFunction) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) IndexingServiceClient(org.apache.druid.client.indexing.IndexingServiceClient) CompactionState(org.apache.druid.timeline.CompactionState) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) DynamicPartitionsSpec(org.apache.druid.indexer.partitions.DynamicPartitionsSpec) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Parameterized(org.junit.runners.Parameterized) DateTimes(org.apache.druid.java.util.common.DateTimes) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) CoordinatorRuntimeParamsTestHelpers(org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers) ImmutableMap(com.google.common.collect.ImmutableMap) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) Collectors(java.util.stream.Collectors) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) NodeRole(org.apache.druid.discovery.NodeRole) DruidLeaderClient(org.apache.druid.discovery.DruidLeaderClient) Intervals(org.apache.druid.java.util.common.Intervals) ClientCompactionTaskDimensionsSpec(org.apache.druid.client.indexing.ClientCompactionTaskDimensionsSpec) HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) HashMap(java.util.HashMap) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) IndexingWorker(org.apache.druid.client.indexing.IndexingWorker) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) ArgumentCaptor(org.mockito.ArgumentCaptor) ImmutableList(com.google.common.collect.ImmutableList) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Request(org.apache.druid.java.util.http.client.Request) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Period(org.joda.time.Period) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) Granularities(org.apache.druid.java.util.common.granularity.Granularities) Mockito(org.mockito.Mockito) SingleDimensionPartitionsSpec(org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec) NullHandling(org.apache.druid.common.config.NullHandling) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) StringFullResponseHolder(org.apache.druid.java.util.http.client.response.StringFullResponseHolder) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) Collections(java.util.Collections) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Aggregations

CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)45 DataSegment (org.apache.druid.timeline.DataSegment)31 Test (org.junit.Test)31 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)24 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 DruidServer (org.apache.druid.client.DruidServer)15 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)14 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 HttpIndexingServiceClient (org.apache.druid.client.indexing.HttpIndexingServiceClient)11 HashMap (java.util.HashMap)10 ArrayList (java.util.ArrayList)9 AutoCompactionSnapshot (org.apache.druid.server.coordinator.AutoCompactionSnapshot)9 List (java.util.List)7 Map (java.util.Map)6 DateTimes (org.apache.druid.java.util.common.DateTimes)6 Intervals (org.apache.druid.java.util.common.Intervals)6 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)6 Assert (org.junit.Assert)6 Before (org.junit.Before)6