use of io.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testRuleInsert.
@Test
public void testRuleInsert() {
List<Rule> rules = Arrays.<Rule>asList(new IntervalLoadRule(new Interval("2015-01-01/2015-02-01"), ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
// New rule should be be reflected in the in memory rules map immediately after being set by user
Map<String, List<Rule>> allRules = ruleManager.getAllRules();
Assert.assertEquals(1, allRules.size());
Assert.assertEquals(1, allRules.get("test_dataSource").size());
Assert.assertEquals(rules.get(0), allRules.get("test_dataSource").get(0));
}
use of io.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testAuditEntryCreated.
@Test
public void testAuditEntryCreated() throws Exception {
List<Rule> rules = Arrays.<Rule>asList(new IntervalLoadRule(new Interval("2015-01-01/2015-02-01"), ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
// fetch rules from metadata storage
ruleManager.poll();
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource"));
// verify audit entry is created
List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("test_dataSource", "rules", null);
Assert.assertEquals(1, auditEntries.size());
AuditEntry entry = auditEntries.get(0);
Assert.assertEquals(rules, mapper.readValue(entry.getPayload(), new TypeReference<List<Rule>>() {
}));
Assert.assertEquals(auditInfo, entry.getAuditInfo());
Assert.assertEquals("test_dataSource", entry.getKey());
}
use of io.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testFetchAuditEntriesForAllDataSources.
@Test
public void testFetchAuditEntriesForAllDataSources() throws Exception {
List<Rule> rules = Arrays.<Rule>asList(new IntervalLoadRule(new Interval("2015-01-01/2015-02-01"), ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
ruleManager.overrideRule("test_dataSource2", rules, auditInfo);
// fetch rules from metadata storage
ruleManager.poll();
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource"));
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource2"));
// test fetch audit entries
List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("rules", null);
Assert.assertEquals(2, auditEntries.size());
for (AuditEntry entry : auditEntries) {
Assert.assertEquals(rules, mapper.readValue(entry.getPayload(), new TypeReference<List<Rule>>() {
}));
Assert.assertEquals(auditInfo, entry.getAuditInfo());
}
}
use of io.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class DruidCoordinatorRuleRunnerTest method testRunTwoTiersTwoReplicants.
/**
* Nodes:
* hot - 2 replicants
* cold - 1 replicant
*
* @throws Exception
*/
@Test
public void testRunTwoTiersTwoReplicants() throws Exception {
mockCoordinator();
mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
EasyMock.replay(mockPeon);
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T06:00:00.000Z"), ImmutableMap.<String, Integer>of("hot", 2)), new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z"), ImmutableMap.<String, Integer>of("cold", 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon), new ServerHolder(new DruidServer("serverHot2", "hostHot2", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon))), "cold", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverCold", "hostCold", 1000, "historical", "cold", 0).toImmutableDruidServer(), mockPeon)))));
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 12);
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("cold").get() == 18);
Assert.assertTrue(stats.getPerTierStats().get("unassignedCount") == null);
Assert.assertTrue(stats.getPerTierStats().get("unassignedSize") == null);
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of io.druid.server.coordinator.rules.IntervalLoadRule in project druid by druid-io.
the class DruidCoordinatorRuleRunnerTest method testReplicantThrottle.
/**
* Nodes:
* hot - 2 replicants
*
* @throws Exception
*/
@Test
public void testReplicantThrottle() throws Exception {
mockCoordinator();
mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
EasyMock.replay(mockPeon);
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2013-01-01T00:00:00.000Z"), ImmutableMap.<String, Integer>of("hot", 2)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon), new ServerHolder(new DruidServer("serverHot2", "hostHot2", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon)))));
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 48);
Assert.assertTrue(stats.getPerTierStats().get("unassignedCount") == null);
Assert.assertTrue(stats.getPerTierStats().get("unassignedSize") == null);
DataSegment overFlowSegment = new DataSegment("test", new Interval("2012-02-01/2012-02-02"), new DateTime().toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 1, 0);
afterParams = ruleRunner.run(new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withEmitter(emitter).withAvailableSegments(Arrays.asList(overFlowSegment)).withDatabaseRuleManager(databaseRuleManager).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).build());
stats = afterParams.getCoordinatorStats();
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 1);
Assert.assertTrue(stats.getPerTierStats().get("unassignedCount") == null);
Assert.assertTrue(stats.getPerTierStats().get("unassignedSize") == null);
EasyMock.verify(mockPeon);
exec.shutdown();
}
Aggregations