Search in sources :

Example 1 with IntervalDropRule

use of io.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.

the class DruidCoordinatorRuleRunnerTest method testDropTooManyInDifferentTiers.

@Test
public void testDropTooManyInDifferentTiers() throws Exception {
    mockCoordinator();
    mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("hot", 1)), new IntervalDropRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server1 = new DruidServer("server1", "host1", 1000, "historical", "hot", 0);
    server1.addDataSegment(availableSegments.get(0).getIdentifier(), availableSegments.get(0));
    DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", 1000, "historical", "normal", 0);
    for (DataSegment segment : availableSegments) {
        server2.addDataSegment(segment.getIdentifier(), segment);
    }
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon))), "normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMillisToWaitBeforeDeleting(0L).build()).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("normal").get() == 1);
    Assert.assertTrue(stats.getGlobalStats().get("deletedCount").get() == 12);
    exec.shutdown();
    EasyMock.verify(mockPeon);
}
Also used : IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ServiceEventBuilder(com.metamx.emitter.service.ServiceEventBuilder) DruidServer(io.druid.client.DruidServer) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 2 with IntervalDropRule

use of io.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.

the class DruidCoordinatorRuleRunnerTest method testDropRemove.

@Test
public void testDropRemove() throws Exception {
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    EasyMock.expect(coordinator.getDynamicConfigs()).andReturn(new CoordinatorDynamicConfig(0, 0, 0, 0, 1, 24, 0, false, null, false)).anyTimes();
    coordinator.removeSegment(EasyMock.<DataSegment>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.replay(coordinator);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("normal", 1)), new IntervalDropRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server = new DruidServer("serverNorm", "hostNorm", 1000, "historical", "normal", 0);
    for (DataSegment segment : availableSegments) {
        server.addDataSegment(segment.getIdentifier(), segment);
    }
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server.toImmutableDruidServer(), mockPeon)))));
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMillisToWaitBeforeDeleting(0L).build()).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertTrue(stats.getGlobalStats().get("deletedCount").get() == 12);
    exec.shutdown();
    EasyMock.verify(coordinator);
}
Also used : IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ServiceEventBuilder(com.metamx.emitter.service.ServiceEventBuilder) DruidServer(io.druid.client.DruidServer) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 3 with IntervalDropRule

use of io.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.

the class DruidCoordinatorRuleRunnerTest method testDontDropInDifferentTiers.

@Test
public void testDontDropInDifferentTiers() throws Exception {
    mockCoordinator();
    mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("hot", 1)), new IntervalDropRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server1 = new DruidServer("server1", "host1", 1000, "historical", "hot", 0);
    DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", 1000, "historical", "normal", 0);
    for (DataSegment segment : availableSegments) {
        server2.addDataSegment(segment.getIdentifier(), segment);
    }
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon))), "normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMillisToWaitBeforeDeleting(0L).build()).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount") == null);
    Assert.assertTrue(stats.getGlobalStats().get("deletedCount").get() == 12);
    exec.shutdown();
    EasyMock.verify(mockPeon);
}
Also used : IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ServiceEventBuilder(com.metamx.emitter.service.ServiceEventBuilder) DruidServer(io.druid.client.DruidServer) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 4 with IntervalDropRule

use of io.druid.server.coordinator.rules.IntervalDropRule in project druid by druid-io.

the class DruidCoordinatorRuleRunnerTest method testDropTooManyInSameTier.

@Test
public void testDropTooManyInSameTier() throws Exception {
    mockCoordinator();
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("normal", 1)), new IntervalDropRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server1 = new DruidServer("serverNorm", "hostNorm", 1000, "historical", "normal", 0);
    server1.addDataSegment(availableSegments.get(0).getIdentifier(), availableSegments.get(0));
    DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", 1000, "historical", "normal", 0);
    for (DataSegment segment : availableSegments) {
        server2.addDataSegment(segment.getIdentifier(), segment);
    }
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon), new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMillisToWaitBeforeDeleting(0L).build()).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("normal").get() == 1);
    Assert.assertTrue(stats.getGlobalStats().get("deletedCount").get() == 12);
    exec.shutdown();
    EasyMock.verify(mockPeon);
}
Also used : IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ServiceEventBuilder(com.metamx.emitter.service.ServiceEventBuilder) DruidServer(io.druid.client.DruidServer) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)4 ServiceEventBuilder (com.metamx.emitter.service.ServiceEventBuilder)4 DruidServer (io.druid.client.DruidServer)4 ForeverLoadRule (io.druid.server.coordinator.rules.ForeverLoadRule)4 IntervalDropRule (io.druid.server.coordinator.rules.IntervalDropRule)4 IntervalLoadRule (io.druid.server.coordinator.rules.IntervalLoadRule)4 Rule (io.druid.server.coordinator.rules.Rule)4 DataSegment (io.druid.timeline.DataSegment)4 DateTime (org.joda.time.DateTime)4 Interval (org.joda.time.Interval)4 Test (org.junit.Test)4