Search in sources :

Example 11 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class SQLMetadataRuleManagerTest method testRemoveRulesOlderThanWithActiveDatasourceShouldNotDelete.

@Test
public void testRemoveRulesOlderThanWithActiveDatasourceShouldNotDelete() throws Exception {
    List<Rule> rules = ImmutableList.of(new IntervalLoadRule(Intervals.of("2015-01-01/2015-02-01"), ImmutableMap.of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
    AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
    ruleManager.overrideRule("test_dataSource", rules, auditInfo);
    // Verify that rule was added
    ruleManager.poll();
    Map<String, List<Rule>> allRules = ruleManager.getAllRules();
    Assert.assertEquals(1, allRules.size());
    Assert.assertEquals(1, allRules.get("test_dataSource").size());
    // Add segment metadata to segment table so that the datasource is considered active
    DataSegment dataSegment = new DataSegment("test_dataSource", Intervals.of("2015-01-01/2015-02-01"), "1", ImmutableMap.of("type", "s3_zip", "bucket", "test", "key", "test_dataSource/xxx"), ImmutableList.of("dim1", "dim2", "dim3"), ImmutableList.of("count", "value"), NoneShardSpec.instance(), 1, 1234L);
    publisher.publishSegment(dataSegment);
    // This will not delete the rule as the datasource has segment in the segment metadata table
    ruleManager.removeRulesForEmptyDatasourcesOlderThan(System.currentTimeMillis());
    // Verify that rule was not deleted
    ruleManager.poll();
    allRules = ruleManager.getAllRules();
    Assert.assertEquals(1, allRules.size());
    Assert.assertEquals(1, allRules.get("test_dataSource").size());
}
Also used : AuditInfo(org.apache.druid.audit.AuditInfo) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 12 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class DataSourcesResourceTest method testIsHandOffComplete.

@Test
public void testIsHandOffComplete() {
    MetadataRuleManager databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
    Rule loadRule = new IntervalLoadRule(Intervals.of("2013-01-02T00:00:00Z/2013-01-03T00:00:00Z"), null);
    Rule dropRule = new IntervalDropRule(Intervals.of("2013-01-01T00:00:00Z/2013-01-02T00:00:00Z"));
    DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, databaseRuleManager, null, null, null);
    // test dropped
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.replay(databaseRuleManager);
    String interval1 = "2013-01-01T01:00:00Z/2013-01-01T02:00:00Z";
    Response response1 = dataSourcesResource.isHandOffComplete("dataSource1", interval1, 1, "v1");
    Assert.assertTrue((boolean) response1.getEntity());
    EasyMock.verify(databaseRuleManager);
    // test isn't dropped and no timeline found
    EasyMock.reset(databaseRuleManager);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(null).once();
    EasyMock.replay(inventoryView, databaseRuleManager);
    String interval2 = "2013-01-02T01:00:00Z/2013-01-02T02:00:00Z";
    Response response2 = dataSourcesResource.isHandOffComplete("dataSource1", interval2, 1, "v1");
    Assert.assertFalse((boolean) response2.getEntity());
    EasyMock.verify(inventoryView, databaseRuleManager);
    // test isn't dropped and timeline exist
    String interval3 = "2013-01-02T02:00:00Z/2013-01-02T03:00:00Z";
    SegmentLoadInfo segmentLoadInfo = new SegmentLoadInfo(createSegment(Intervals.of(interval3), "v1", 1));
    segmentLoadInfo.addServer(createHistoricalServerMetadata("test"));
    VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = new VersionedIntervalTimeline<String, SegmentLoadInfo>(null) {

        @Override
        public List<TimelineObjectHolder<String, SegmentLoadInfo>> lookupWithIncompletePartitions(Interval interval) {
            PartitionHolder<SegmentLoadInfo> partitionHolder = new PartitionHolder<>(new NumberedPartitionChunk<>(1, 1, segmentLoadInfo));
            List<TimelineObjectHolder<String, SegmentLoadInfo>> ret = new ArrayList<>();
            ret.add(new TimelineObjectHolder<>(Intervals.of(interval3), "v1", partitionHolder));
            return ret;
        }
    };
    EasyMock.reset(inventoryView, databaseRuleManager);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(timeline).once();
    EasyMock.replay(inventoryView, databaseRuleManager);
    Response response3 = dataSourcesResource.isHandOffComplete("dataSource1", interval3, 1, "v1");
    Assert.assertTrue((boolean) response3.getEntity());
    EasyMock.verify(inventoryView, databaseRuleManager);
}
Also used : PartitionHolder(org.apache.druid.timeline.partition.PartitionHolder) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) SegmentLoadInfo(org.apache.druid.client.SegmentLoadInfo) ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) ArrayList(java.util.ArrayList) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Response(javax.ws.rs.core.Response) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 13 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class CoordinatorRuleManagerTest method testGetRulesWithKnownDatasourceReturningAllRulesWithDefaultRule.

@Test
public void testGetRulesWithKnownDatasourceReturningAllRulesWithDefaultRule() {
    final CoordinatorRuleManager manager = new CoordinatorRuleManager(objectMapper, () -> tieredBrokerConfig, mockClient());
    manager.poll();
    final List<Rule> rules = manager.getRulesWithDefault(DATASOURCE2);
    final List<Rule> expectedRules = new ArrayList<>();
    expectedRules.add(new ForeverLoadRule(null));
    expectedRules.add(new IntervalDropRule(Intervals.of("2020-01-01/2020-01-02")));
    expectedRules.addAll(DEFAULT_RULES);
    Assert.assertEquals(expectedRules, rules);
}
Also used : ArrayList(java.util.ArrayList) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) PeriodLoadRule(org.apache.druid.server.coordinator.rules.PeriodLoadRule) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) ForeverDropRule(org.apache.druid.server.coordinator.rules.ForeverDropRule) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Test(org.junit.Test)

Example 14 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class TieredBrokerHostSelector method select.

public <T> Pair<String, Server> select(final Query<T> query) {
    synchronized (lock) {
        if (!ruleManager.isStarted() || !started) {
            return getDefaultLookup();
        }
    }
    String brokerServiceName = null;
    for (TieredBrokerSelectorStrategy strategy : strategies) {
        final Optional<String> optionalName = strategy.getBrokerServiceName(tierConfig, query);
        if (optionalName.isPresent()) {
            brokerServiceName = optionalName.get();
            break;
        }
    }
    if (brokerServiceName == null) {
        // For Union Queries tier will be selected on the rules for first dataSource.
        List<Rule> rules = ruleManager.getRulesWithDefault(Iterables.getFirst(query.getDataSource().getTableNames(), null));
        // find the rule that can apply to the entire set of intervals
        DateTime now = DateTimes.nowUtc();
        int lastRulePosition = -1;
        LoadRule baseRule = null;
        for (Interval interval : query.getIntervals()) {
            int currRulePosition = 0;
            for (Rule rule : rules) {
                if (rule instanceof LoadRule && currRulePosition > lastRulePosition && rule.appliesTo(interval, now)) {
                    lastRulePosition = currRulePosition;
                    baseRule = (LoadRule) rule;
                    break;
                }
                currRulePosition++;
            }
        }
        if (baseRule == null) {
            return getDefaultLookup();
        }
        // in the baseRule, find the broker of highest priority
        for (Map.Entry<String, String> entry : tierConfig.getTierToBrokerMap().entrySet()) {
            if (baseRule.getTieredReplicants().containsKey(entry.getKey())) {
                brokerServiceName = entry.getValue();
                break;
            }
        }
    }
    if (brokerServiceName == null) {
        log.error("No brokerServiceName found for datasource[%s], intervals[%s]. Using default[%s].", query.getDataSource(), query.getIntervals(), tierConfig.getDefaultBrokerServiceName());
        brokerServiceName = tierConfig.getDefaultBrokerServiceName();
    }
    return getServerPair(brokerServiceName);
}
Also used : LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) DateTime(org.joda.time.DateTime) Interval(org.joda.time.Interval)

Example 15 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorTieredRun.

@Test(timeout = 60_000L)
public void testCoordinatorTieredRun() throws Exception {
    final String dataSource = "dataSource", hotTierName = "hot", coldTierName = "cold";
    final Rule hotTier = new IntervalLoadRule(Intervals.of("2018-01-01/P1M"), ImmutableMap.of(hotTierName, 1));
    final Rule coldTier = new ForeverLoadRule(ImmutableMap.of(coldTierName, 1));
    final String loadPathCold = "/druid/loadqueue/cold:1234";
    final DruidServer hotServer = new DruidServer("hot", "hot", null, 5L, ServerType.HISTORICAL, hotTierName, 0);
    final DruidServer coldServer = new DruidServer("cold", "cold", null, 5L, ServerType.HISTORICAL, coldTierName, 0);
    final Map<String, DataSegment> dataSegments = ImmutableMap.of("2018-01-02T00:00:00.000Z_2018-01-03T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-02/P1D"), "v1", null, null, null, null, 0x9, 0), "2018-01-03T00:00:00.000Z_2018-01-04T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-03/P1D"), "v1", null, null, null, null, 0x9, 0), "2017-01-01T00:00:00.000Z_2017-01-02T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2017-01-01/P1D"), "v1", null, null, null, null, 0x9, 0));
    final LoadQueuePeon loadQueuePeonCold = new CuratorLoadQueuePeon(curator, loadPathCold, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_cold_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_cold-%d"), druidCoordinatorConfig);
    final PathChildrenCache pathChildrenCacheCold = new PathChildrenCache(curator, loadPathCold, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_cold-%d"));
    loadManagementPeons.putAll(ImmutableMap.of("hot", loadQueuePeon, "cold", loadQueuePeonCold));
    loadQueuePeonCold.start();
    pathChildrenCache.start();
    pathChildrenCacheCold.start();
    DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.emptyMap()) };
    dataSegments.values().forEach(druidDataSources[0]::addSegment);
    setupSegmentsMetadataMock(druidDataSources[0]);
    EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(hotTier, coldTier)).atLeastOnce();
    EasyMock.expect(metadataRuleManager.getAllRules()).andReturn(ImmutableMap.of(dataSource, ImmutableList.of(hotTier, coldTier))).atLeastOnce();
    EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(hotServer, coldServer)).atLeastOnce();
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(metadataRuleManager, serverInventoryView);
    coordinator.start();
    // Wait for this coordinator to become leader
    leaderAnnouncerLatch.await();
    final CountDownLatch assignSegmentLatchHot = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(2, pathChildrenCache, dataSegments, hotServer);
    final CountDownLatch assignSegmentLatchCold = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(1, pathChildrenCacheCold, dataSegments, coldServer);
    assignSegmentLatchHot.await();
    assignSegmentLatchCold.await();
    final CountDownLatch coordinatorRunLatch = new CountDownLatch(2);
    serviceEmitter.latch = coordinatorRunLatch;
    coordinatorRunLatch.await();
    Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
    Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = coordinator.computeUnderReplicationCountsPerDataSourcePerTier();
    Assert.assertEquals(2, underReplicationCountsPerDataSourcePerTier.size());
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(hotTierName).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(coldTierName).getLong(dataSource));
    Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTierUsingClusterView = coordinator.computeUnderReplicationCountsPerDataSourcePerTierUsingClusterView();
    Assert.assertEquals(2, underReplicationCountsPerDataSourcePerTierUsingClusterView.size());
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(hotTierName).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(coldTierName).getLong(dataSource));
    coordinator.stop();
    leaderUnannouncerLatch.await();
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(segmentsMetadataManager);
    EasyMock.verify(metadataRuleManager);
}
Also used : IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) Test(org.junit.Test)

Aggregations

Rule (org.apache.druid.server.coordinator.rules.Rule)16 Test (org.junit.Test)11 IntervalLoadRule (org.apache.druid.server.coordinator.rules.IntervalLoadRule)10 DataSegment (org.apache.druid.timeline.DataSegment)7 AuditInfo (org.apache.druid.audit.AuditInfo)6 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)5 ImmutableList (com.google.common.collect.ImmutableList)4 Object2LongMap (it.unimi.dsi.fastutil.objects.Object2LongMap)4 List (java.util.List)4 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)4 ForeverLoadRule (org.apache.druid.server.coordinator.rules.ForeverLoadRule)4 DateTime (org.joda.time.DateTime)4 CountDownLatch (java.util.concurrent.CountDownLatch)3 DruidDataSource (org.apache.druid.client.DruidDataSource)3 DruidServer (org.apache.druid.client.DruidServer)3 ForeverBroadcastDistributionRule (org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule)3 LoadRule (org.apache.druid.server.coordinator.rules.LoadRule)3 Interval (org.joda.time.Interval)3 TypeReference (com.fasterxml.jackson.core.type.TypeReference)2 ArrayList (java.util.ArrayList)2