use of org.apache.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.
the class RunRulesTest method testDontDropInDifferentTiers.
@Test
public void testDontDropInDifferentTiers() {
mockCoordinator();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("hot", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidServer server1 = new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, "hot", 0);
DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", null, 1000, ServerType.HISTORICAL, "normal", 0);
for (DataSegment segment : usedSegments) {
server2.addDataSegment(segment);
}
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1.toImmutableDruidServer(), mockPeon)).addTier("normal", new ServerHolder(server2.toImmutableDruidServer(), mockPeon)).build();
SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertTrue(stats.getTiers("droppedCount").isEmpty());
Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of org.apache.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.
the class RunRulesTest method testDropTooManyInSameTier.
@Test
public void testDropTooManyInSameTier() {
mockCoordinator();
mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("normal", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidServer server1 = new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, "normal", 0);
server1.addDataSegment(usedSegments.get(0));
DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", null, 1000, ServerType.HISTORICAL, "normal", 0);
for (DataSegment segment : usedSegments) {
server2.addDataSegment(segment);
}
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(server1.toImmutableDruidServer(), mockPeon), new ServerHolder(server2.toImmutableDruidServer(), mockPeon)).build();
SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(CoordinatorDynamicConfig.builder().withLeadingTimeMillisBeforeCanMarkAsUnusedOvershadowedSegments(0L).build()).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "normal"));
Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of org.apache.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.
the class RunRulesTest method testDropRemove.
@Test
public void testDropRemove() {
mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(coordinator.getDynamicConfigs()).andReturn(createCoordinatorDynamicConfig()).anyTimes();
coordinator.markSegmentAsUnused(EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.replay(coordinator);
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("normal", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidServer server = new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, "normal", 0);
for (DataSegment segment : usedSegments) {
server.addDataSegment(segment);
}
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(server.toImmutableDruidServer(), mockPeon)).build();
SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
exec.shutdown();
EasyMock.verify(coordinator);
}
use of org.apache.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.
the class DataSourcesResourceTest method testIsHandOffComplete.
@Test
public void testIsHandOffComplete() {
MetadataRuleManager databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
Rule loadRule = new IntervalLoadRule(Intervals.of("2013-01-02T00:00:00Z/2013-01-03T00:00:00Z"), null);
Rule dropRule = new IntervalDropRule(Intervals.of("2013-01-01T00:00:00Z/2013-01-02T00:00:00Z"));
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, databaseRuleManager, null, null, null);
// test dropped
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.replay(databaseRuleManager);
String interval1 = "2013-01-01T01:00:00Z/2013-01-01T02:00:00Z";
Response response1 = dataSourcesResource.isHandOffComplete("dataSource1", interval1, 1, "v1");
Assert.assertTrue((boolean) response1.getEntity());
EasyMock.verify(databaseRuleManager);
// test isn't dropped and no timeline found
EasyMock.reset(databaseRuleManager);
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(null).once();
EasyMock.replay(inventoryView, databaseRuleManager);
String interval2 = "2013-01-02T01:00:00Z/2013-01-02T02:00:00Z";
Response response2 = dataSourcesResource.isHandOffComplete("dataSource1", interval2, 1, "v1");
Assert.assertFalse((boolean) response2.getEntity());
EasyMock.verify(inventoryView, databaseRuleManager);
// test isn't dropped and timeline exist
String interval3 = "2013-01-02T02:00:00Z/2013-01-02T03:00:00Z";
SegmentLoadInfo segmentLoadInfo = new SegmentLoadInfo(createSegment(Intervals.of(interval3), "v1", 1));
segmentLoadInfo.addServer(createHistoricalServerMetadata("test"));
VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = new VersionedIntervalTimeline<String, SegmentLoadInfo>(null) {
@Override
public List<TimelineObjectHolder<String, SegmentLoadInfo>> lookupWithIncompletePartitions(Interval interval) {
PartitionHolder<SegmentLoadInfo> partitionHolder = new PartitionHolder<>(new NumberedPartitionChunk<>(1, 1, segmentLoadInfo));
List<TimelineObjectHolder<String, SegmentLoadInfo>> ret = new ArrayList<>();
ret.add(new TimelineObjectHolder<>(Intervals.of(interval3), "v1", partitionHolder));
return ret;
}
};
EasyMock.reset(inventoryView, databaseRuleManager);
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(timeline).once();
EasyMock.replay(inventoryView, databaseRuleManager);
Response response3 = dataSourcesResource.isHandOffComplete("dataSource1", interval3, 1, "v1");
Assert.assertTrue((boolean) response3.getEntity());
EasyMock.verify(inventoryView, databaseRuleManager);
}
use of org.apache.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.
the class CoordinatorRuleManagerTest method testGetRulesWithKnownDatasourceReturningAllRulesWithDefaultRule.
@Test
public void testGetRulesWithKnownDatasourceReturningAllRulesWithDefaultRule() {
final CoordinatorRuleManager manager = new CoordinatorRuleManager(objectMapper, () -> tieredBrokerConfig, mockClient());
manager.poll();
final List<Rule> rules = manager.getRulesWithDefault(DATASOURCE2);
final List<Rule> expectedRules = new ArrayList<>();
expectedRules.add(new ForeverLoadRule(null));
expectedRules.add(new IntervalDropRule(Intervals.of("2020-01-01/2020-01-02")));
expectedRules.addAll(DEFAULT_RULES);
Assert.assertEquals(expectedRules, rules);
}
Aggregations