use of org.apache.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorTieredRun.
@Test(timeout = 60_000L)
public void testCoordinatorTieredRun() throws Exception {
final String dataSource = "dataSource", hotTierName = "hot", coldTierName = "cold";
final Rule hotTier = new IntervalLoadRule(Intervals.of("2018-01-01/P1M"), ImmutableMap.of(hotTierName, 1));
final Rule coldTier = new ForeverLoadRule(ImmutableMap.of(coldTierName, 1));
final String loadPathCold = "/druid/loadqueue/cold:1234";
final DruidServer hotServer = new DruidServer("hot", "hot", null, 5L, ServerType.HISTORICAL, hotTierName, 0);
final DruidServer coldServer = new DruidServer("cold", "cold", null, 5L, ServerType.HISTORICAL, coldTierName, 0);
final Map<String, DataSegment> dataSegments = ImmutableMap.of("2018-01-02T00:00:00.000Z_2018-01-03T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-02/P1D"), "v1", null, null, null, null, 0x9, 0), "2018-01-03T00:00:00.000Z_2018-01-04T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-03/P1D"), "v1", null, null, null, null, 0x9, 0), "2017-01-01T00:00:00.000Z_2017-01-02T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2017-01-01/P1D"), "v1", null, null, null, null, 0x9, 0));
final LoadQueuePeon loadQueuePeonCold = new CuratorLoadQueuePeon(curator, loadPathCold, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_cold_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_cold-%d"), druidCoordinatorConfig);
final PathChildrenCache pathChildrenCacheCold = new PathChildrenCache(curator, loadPathCold, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_cold-%d"));
loadManagementPeons.putAll(ImmutableMap.of("hot", loadQueuePeon, "cold", loadQueuePeonCold));
loadQueuePeonCold.start();
pathChildrenCache.start();
pathChildrenCacheCold.start();
DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.emptyMap()) };
dataSegments.values().forEach(druidDataSources[0]::addSegment);
setupSegmentsMetadataMock(druidDataSources[0]);
EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(hotTier, coldTier)).atLeastOnce();
EasyMock.expect(metadataRuleManager.getAllRules()).andReturn(ImmutableMap.of(dataSource, ImmutableList.of(hotTier, coldTier))).atLeastOnce();
EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(hotServer, coldServer)).atLeastOnce();
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(metadataRuleManager, serverInventoryView);
coordinator.start();
// Wait for this coordinator to become leader
leaderAnnouncerLatch.await();
final CountDownLatch assignSegmentLatchHot = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(2, pathChildrenCache, dataSegments, hotServer);
final CountDownLatch assignSegmentLatchCold = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(1, pathChildrenCacheCold, dataSegments, coldServer);
assignSegmentLatchHot.await();
assignSegmentLatchCold.await();
final CountDownLatch coordinatorRunLatch = new CountDownLatch(2);
serviceEmitter.latch = coordinatorRunLatch;
coordinatorRunLatch.await();
Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = coordinator.computeUnderReplicationCountsPerDataSourcePerTier();
Assert.assertEquals(2, underReplicationCountsPerDataSourcePerTier.size());
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(hotTierName).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(coldTierName).getLong(dataSource));
Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTierUsingClusterView = coordinator.computeUnderReplicationCountsPerDataSourcePerTierUsingClusterView();
Assert.assertEquals(2, underReplicationCountsPerDataSourcePerTierUsingClusterView.size());
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(hotTierName).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(coldTierName).getLong(dataSource));
coordinator.stop();
leaderUnannouncerLatch.await();
EasyMock.verify(serverInventoryView);
EasyMock.verify(segmentsMetadataManager);
EasyMock.verify(metadataRuleManager);
}
use of org.apache.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class RunRulesTest method testRunThreeTiersOneReplicant.
/**
* Nodes:
* hot - 1 replicant
* normal - 1 replicant
* cold - 1 replicant
*/
@Test
public void testRunThreeTiersOneReplicant() {
mockCoordinator();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T06:00:00.000Z"), ImmutableMap.of("hot", 1)), new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("normal", 1)), new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z"), ImmutableMap.of("cold", 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 0).toImmutableDruidServer(), mockPeon)).addTier("normal", new ServerHolder(new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, "normal", 0).toImmutableDruidServer(), mockPeon)).addTier("cold", new ServerHolder(new DruidServer("serverCold", "hostCold", null, 1000, ServerType.HISTORICAL, "cold", 0).toImmutableDruidServer(), mockPeon)).build();
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, balancerStrategy).withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(5).build()).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertEquals(6L, stats.getTieredStat("assignedCount", "hot"));
Assert.assertEquals(6L, stats.getTieredStat("assignedCount", "normal"));
Assert.assertEquals(12L, stats.getTieredStat("assignedCount", "cold"));
Assert.assertTrue(stats.getTiers("unassignedCount").isEmpty());
Assert.assertTrue(stats.getTiers("unassignedSize").isEmpty());
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of org.apache.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class RunRulesTest method testDropTooManyInDifferentTiers.
@Test
public void testDropTooManyInDifferentTiers() {
mockCoordinator();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("hot", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidServer server1 = new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, "hot", 0);
server1.addDataSegment(usedSegments.get(0));
DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", null, 1000, ServerType.HISTORICAL, "normal", 0);
for (DataSegment segment : usedSegments) {
server2.addDataSegment(segment);
}
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1.toImmutableDruidServer(), mockPeon)).addTier("normal", new ServerHolder(server2.toImmutableDruidServer(), mockPeon)).build();
SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "normal"));
Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of org.apache.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class RunRulesTest method testReplicantThrottleAcrossTiers.
/**
* Nodes:
* hot - nothing loaded
* _default_tier - 1 segment loaded
*/
@Test
public void testReplicantThrottleAcrossTiers() {
EasyMock.expect(coordinator.getDynamicConfigs()).andReturn(CoordinatorDynamicConfig.builder().withReplicationThrottleLimit(7).withReplicantLifetime(1).withMaxSegmentsInNodeLoadingQueue(1000).build()).atLeastOnce();
coordinator.markSegmentAsUnused(EasyMock.anyObject());
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(coordinator);
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Collections.singletonList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2013-01-01T00:00:00.000Z"), ImmutableMap.of("hot", 1, DruidServer.DEFAULT_TIER, 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer(), mockPeon)).addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)).build();
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, balancerStrategy).build();
RunRules runner = new RunRules(new ReplicationThrottler(7, 1, false), coordinator);
DruidCoordinatorRuntimeParams afterParams = runner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertEquals(24L, stats.getTieredStat("assignedCount", "hot"));
Assert.assertEquals(7L, stats.getTieredStat("assignedCount", DruidServer.DEFAULT_TIER));
Assert.assertTrue(stats.getTiers("unassignedCount").isEmpty());
Assert.assertTrue(stats.getTiers("unassignedSize").isEmpty());
EasyMock.verify(mockPeon);
exec.shutdown();
}
use of org.apache.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class RunRulesTest method testDropReplicantThrottle.
@Test
public void testDropReplicantThrottle() {
mockCoordinator();
mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Collections.singletonList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2013-01-02T00:00:00.000Z"), ImmutableMap.of("normal", 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DataSegment overFlowSegment = new DataSegment("test", Intervals.of("2012-02-01/2012-02-02"), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 1, 0);
List<DataSegment> longerUsedSegments = Lists.newArrayList(usedSegments);
longerUsedSegments.add(overFlowSegment);
DruidServer server1 = new DruidServer("serverNorm1", "hostNorm1", null, 1000, ServerType.HISTORICAL, "normal", 0);
for (DataSegment segment : longerUsedSegments) {
server1.addDataSegment(segment);
}
DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", null, 1000, ServerType.HISTORICAL, "normal", 0);
for (DataSegment segment : longerUsedSegments) {
server2.addDataSegment(segment);
}
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(server1.toImmutableDruidServer(), mockPeon), new ServerHolder(server2.toImmutableDruidServer(), mockPeon)).build();
SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(druidCluster).withDynamicConfigs(COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).withUsedSegmentsInTest(longerUsedSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
// There is no throttling on drop
Assert.assertEquals(25L, stats.getTieredStat("droppedCount", "normal"));
EasyMock.verify(mockPeon);
exec.shutdown();
}
Aggregations