use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class LoadRuleTest method testLoadPriority.
@Test
public void testLoadPriority() {
EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(false).anyTimes();
final LoadQueuePeon mockPeon1 = createEmptyPeon();
final LoadQueuePeon mockPeon2 = createEmptyPeon();
mockPeon2.loadSegment(EasyMock.anyObject(), EasyMock.isNull());
EasyMock.expectLastCall().once();
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(2);
EasyMock.replay(throttler, mockPeon1, mockPeon2, mockBalancerStrategy);
final LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 10, "tier2", 10));
final DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", new ServerHolder(new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, "tier1", 0).toImmutableDruidServer(), mockPeon1)).addTier("tier2", new ServerHolder(new DruidServer("server2", "host2", null, 1000, ServerType.HISTORICAL, "tier2", 1).toImmutableDruidServer(), mockPeon2), new ServerHolder(new DruidServer("server3", "host3", null, 1000, ServerType.HISTORICAL, "tier2", 1).toImmutableDruidServer(), mockPeon2)).build();
final DataSegment segment = createDataSegment("foo");
final CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
Assert.assertEquals(0L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier1"));
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier2"));
EasyMock.verify(throttler, mockPeon1, mockPeon2, mockBalancerStrategy);
}
use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class LoadRuleTest method testDropDuringDecommissioning.
/**
* 2 servers with a segment, one server decommissioning.
* Should drop a segment from both.
*/
@Test
public void testDropDuringDecommissioning() {
final LoadQueuePeon mockPeon = createEmptyPeon();
mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().times(2);
EasyMock.expect(mockBalancerStrategy.pickServersToDrop(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(4);
EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 0));
final DataSegment segment1 = createDataSegment("foo1");
final DataSegment segment2 = createDataSegment("foo2");
DruidServer server1 = createServer("tier1");
server1.addDataSegment(segment1);
DruidServer server2 = createServer("tier1");
server2.addDataSegment(segment2);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", new ServerHolder(server1.toImmutableDruidServer(), mockPeon, true), new ServerHolder(server2.toImmutableDruidServer(), mockPeon, false)).build();
DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, segment1, segment2);
CoordinatorStats stats = rule.run(null, params, segment1);
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
stats = rule.run(null, params, segment2);
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
EasyMock.verify(throttler, mockPeon);
}
use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class UnloadUnusedSegmentsTest method test_unloadUnusedSegmentsFromAllServers.
@Test
public void test_unloadUnusedSegmentsFromAllServers() {
mockDruidServer(historicalServer, ServerType.HISTORICAL, "historical", DruidServer.DEFAULT_TIER, 30L, 100L, segments, dataSources);
mockDruidServer(historicalServerTier2, ServerType.HISTORICAL, "historicalTier2", "tier2", 30L, 100L, segments, dataSources);
mockDruidServer(brokerServer, ServerType.BROKER, "broker", DruidServer.DEFAULT_TIER, 30L, 100L, segments, dataSources);
mockDruidServer(indexerServer, ServerType.INDEXER_EXECUTOR, "indexer", DruidServer.DEFAULT_TIER, 30L, 100L, segmentsForRealtime, dataSourcesForRealtime);
// Mock stuff that the coordinator needs
mockCoordinator(coordinator);
mockRuleManager(databaseRuleManager);
// We keep datasource2 segments only, drop datasource1 and broadcastDatasource from all servers
// realtimeSegment is intentionally missing from the set, to match how a realtime tasks's unpublished segments
// will not appear in the coordinator's view of used segments.
Set<DataSegment> usedSegments = ImmutableSet.of(segment2);
DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(DruidClusterBuilder.newBuilder().addTier(DruidServer.DEFAULT_TIER, new ServerHolder(historicalServer, historicalPeon, false)).addTier("tier2", new ServerHolder(historicalServerTier2, historicalTier2Peon, false)).withBrokers(new ServerHolder(brokerServer, brokerPeon, false)).withRealtimes(new ServerHolder(indexerServer, indexerPeon, false)).build()).withLoadManagementPeons(ImmutableMap.of("historical", historicalPeon, "historicalTier2", historicalTier2Peon, "broker", brokerPeon, "indexer", indexerPeon)).withUsedSegmentsInTest(usedSegments).withBroadcastDatasources(broadcastDatasourceNames).withDatabaseRuleManager(databaseRuleManager).build();
params = new UnloadUnusedSegments().run(params);
CoordinatorStats stats = params.getCoordinatorStats();
// We drop segment1 and broadcast1 from all servers, realtimeSegment is not dropped by the indexer
Assert.assertEquals(5, stats.getTieredStat("unneededCount", DruidServer.DEFAULT_TIER));
Assert.assertEquals(2, stats.getTieredStat("unneededCount", "tier2"));
}
use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class BroadcastDistributionRuleTest method testBroadcastToSingleDataSource.
@Test
public void testBroadcastToSingleDataSource() {
final ForeverBroadcastDistributionRule rule = new ForeverBroadcastDistributionRule();
CoordinatorStats stats = rule.run(null, makeCoordinartorRuntimeParams(druidCluster, smallSegment, largeSegments.get(0), largeSegments.get(1), largeSegments.get(2), largeSegments2.get(0), largeSegments2.get(1)), smallSegment);
Assert.assertEquals(5L, stats.getGlobalStat(LoadRule.ASSIGNED_COUNT));
Assert.assertFalse(stats.hasPerTierStats());
Assert.assertTrue(holdersOfLargeSegments.stream().allMatch(holder -> holder.getPeon().getSegmentsToLoad().contains(smallSegment)));
Assert.assertTrue(holdersOfLargeSegments2.stream().allMatch(holder -> holder.getPeon().getSegmentsToLoad().contains(smallSegment)));
Assert.assertTrue(holderOfSmallSegment.isServingSegment(smallSegment));
}
use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.
the class CompactSegmentsTest method testRunWithLockedIntervals.
@Test
public void testRunWithLockedIntervals() {
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
// Lock all intervals for dataSource_1 and dataSource_2
final String datasource1 = DATA_SOURCE_PREFIX + 1;
leaderClient.lockedIntervals.computeIfAbsent(datasource1, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
final String datasource2 = DATA_SOURCE_PREFIX + 2;
leaderClient.lockedIntervals.computeIfAbsent(datasource2, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
// Lock all intervals but one for dataSource_0
final String datasource0 = DATA_SOURCE_PREFIX + 0;
leaderClient.lockedIntervals.computeIfAbsent(datasource0, k -> new ArrayList<>()).add(Intervals.of("2017-01-01T13:00:00Z/2017-02-01"));
// Verify that locked intervals are skipped and only one compaction task
// is submitted for dataSource_0
CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
final CoordinatorStats stats = doCompactSegments(compactSegments, createCompactionConfigs(2), 4);
Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
Assert.assertEquals(1, leaderClient.submittedCompactionTasks.size());
final ClientCompactionTaskQuery compactionTask = leaderClient.submittedCompactionTasks.get(0);
Assert.assertEquals(datasource0, compactionTask.getDataSource());
Assert.assertEquals(Intervals.of("2017-01-01T00:00:00/2017-01-01T12:00:00"), compactionTask.getIoConfig().getInputSpec().getInterval());
}
Aggregations