Search in sources :

Example 6 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class DataSourcesResource method isHandOffComplete.

/**
 * Used by the realtime tasks to learn whether a segment is handed off or not.
 * It returns true when the segment will never be handed off or is already handed off. Otherwise, it returns false.
 */
@GET
@Path("/{dataSourceName}/handoffComplete")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(DatasourceResourceFilter.class)
public Response isHandOffComplete(@PathParam("dataSourceName") String dataSourceName, @QueryParam("interval") final String interval, @QueryParam("partitionNumber") final int partitionNumber, @QueryParam("version") final String version) {
    try {
        final List<Rule> rules = metadataRuleManager.getRulesWithDefault(dataSourceName);
        final Interval theInterval = Intervals.of(interval);
        final SegmentDescriptor descriptor = new SegmentDescriptor(theInterval, version, partitionNumber);
        final DateTime now = DateTimes.nowUtc();
        // dropped means a segment will never be handed off, i.e it completed hand off
        // init to true, reset to false only if this segment can be loaded by rules
        boolean dropped = true;
        for (Rule rule : rules) {
            if (rule.appliesTo(theInterval, now)) {
                if (rule instanceof LoadRule) {
                    dropped = false;
                }
                break;
            }
        }
        if (dropped) {
            return Response.ok(true).build();
        }
        TimelineLookup<String, SegmentLoadInfo> timeline = serverInventoryView.getTimeline(new TableDataSource(dataSourceName));
        if (timeline == null) {
            log.debug("No timeline found for datasource[%s]", dataSourceName);
            return Response.ok(false).build();
        }
        Iterable<ImmutableSegmentLoadInfo> servedSegmentsInInterval = prepareServedSegmentsInInterval(timeline, theInterval);
        if (isSegmentLoaded(servedSegmentsInInterval, descriptor)) {
            return Response.ok(true).build();
        }
        return Response.ok(false).build();
    } catch (Exception e) {
        log.error(e, "Error while handling hand off check request");
        return Response.serverError().entity(ImmutableMap.of("error", e.toString())).build();
    }
}
Also used : SegmentLoadInfo(org.apache.druid.client.SegmentLoadInfo) ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) DateTime(org.joda.time.DateTime) UnknownSegmentIdsException(org.apache.druid.metadata.UnknownSegmentIdsException) TableDataSource(org.apache.druid.query.TableDataSource) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) Interval(org.joda.time.Interval) Path(javax.ws.rs.Path) ResourceFilters(com.sun.jersey.spi.container.ResourceFilters) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 7 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class DruidCoordinator method computeUnderReplicationCountsPerDataSourcePerTierForSegmentsInternal.

private Map<String, Object2LongMap<String>> computeUnderReplicationCountsPerDataSourcePerTierForSegmentsInternal(Iterable<DataSegment> dataSegments, boolean computeUsingClusterView) {
    final Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = new HashMap<>();
    if (segmentReplicantLookup == null) {
        return underReplicationCountsPerDataSourcePerTier;
    }
    if (computeUsingClusterView && cluster == null) {
        throw new ServiceUnavailableException("coordinator hasn't populated information about cluster yet, try again later");
    }
    final DateTime now = DateTimes.nowUtc();
    for (final DataSegment segment : dataSegments) {
        final List<Rule> rules = metadataRuleManager.getRulesWithDefault(segment.getDataSource());
        for (final Rule rule : rules) {
            if (!rule.appliesTo(segment, now)) {
                // Rule did not match. Continue to the next Rule.
                continue;
            }
            if (!rule.canLoadSegments()) {
                // Hence, there is no need to update underReplicationCountsPerDataSourcePerTier map
                break;
            }
            if (computeUsingClusterView) {
                rule.updateUnderReplicatedWithClusterView(underReplicationCountsPerDataSourcePerTier, segmentReplicantLookup, cluster, segment);
            } else {
                rule.updateUnderReplicated(underReplicationCountsPerDataSourcePerTier, segmentReplicantLookup, segment);
            }
            // and match each segment with the first rule that applies. Each segment may only match a single rule.
            break;
        }
    }
    return underReplicationCountsPerDataSourcePerTier;
}
Also used : Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Object2IntOpenHashMap(it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ServiceUnavailableException(org.apache.druid.server.initialization.jetty.ServiceUnavailableException) Rule(org.apache.druid.server.coordinator.rules.Rule) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime)

Example 8 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class RunRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
    // eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
    // as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
    // to unload such segments in UnloadUnusedSegments.
    Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
    // Run through all matched rules for used segments
    DateTime now = DateTimes.nowUtc();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    final Set<String> broadcastDatasources = new HashSet<>();
    for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
        for (Rule rule : rules) {
            // executes before BalanceSegments.
            if (rule instanceof BroadcastDistributionRule) {
                broadcastDatasources.add(dataSource.getName());
                break;
            }
        }
    }
    for (DataSegment segment : params.getUsedSegments()) {
        if (overshadowed.contains(segment.getId())) {
            // Skipping overshadowed segments
            continue;
        }
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
                    log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
                    paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
                }
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getId());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) Rule(org.apache.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 9 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class SQLMetadataRuleManagerTest method testRemoveRulesOlderThanWithNonExistenceDatasourceAndNewerThanTimestampShouldNotDelete.

@Test
public void testRemoveRulesOlderThanWithNonExistenceDatasourceAndNewerThanTimestampShouldNotDelete() {
    List<Rule> rules = ImmutableList.of(new IntervalLoadRule(Intervals.of("2015-01-01/2015-02-01"), ImmutableMap.of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
    AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
    ruleManager.overrideRule("test_dataSource", rules, auditInfo);
    // Verify that rule was added
    ruleManager.poll();
    Map<String, List<Rule>> allRules = ruleManager.getAllRules();
    Assert.assertEquals(1, allRules.size());
    Assert.assertEquals(1, allRules.get("test_dataSource").size());
    // This will not delete the rule as the rule was created just now so it will have the created timestamp later than
    // the timestamp 2012-01-01T00:00:00Z
    ruleManager.removeRulesForEmptyDatasourcesOlderThan(DateTimes.of("2012-01-01T00:00:00Z").getMillis());
    // Verify that rule was not deleted
    ruleManager.poll();
    allRules = ruleManager.getAllRules();
    Assert.assertEquals(1, allRules.size());
    Assert.assertEquals(1, allRules.get("test_dataSource").size());
}
Also used : AuditInfo(org.apache.druid.audit.AuditInfo) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) Test(org.junit.Test)

Example 10 with Rule

use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.

the class SQLMetadataRuleManagerTest method testAuditEntryCreated.

@Test
public void testAuditEntryCreated() throws Exception {
    List<Rule> rules = Collections.singletonList(new IntervalLoadRule(Intervals.of("2015-01-01/2015-02-01"), ImmutableMap.of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
    AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
    ruleManager.overrideRule("test_dataSource", rules, auditInfo);
    // fetch rules from metadata storage
    ruleManager.poll();
    Assert.assertEquals(rules, ruleManager.getRules("test_dataSource"));
    // verify audit entry is created
    List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("test_dataSource", "rules", null);
    Assert.assertEquals(1, auditEntries.size());
    AuditEntry entry = auditEntries.get(0);
    Assert.assertEquals(rules, mapper.readValue(entry.getPayload(), new TypeReference<List<Rule>>() {
    }));
    Assert.assertEquals(auditInfo, entry.getAuditInfo());
    Assert.assertEquals("test_dataSource", entry.getKey());
}
Also used : AuditInfo(org.apache.druid.audit.AuditInfo) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) AuditEntry(org.apache.druid.audit.AuditEntry) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Test(org.junit.Test)

Aggregations

Rule (org.apache.druid.server.coordinator.rules.Rule)16 Test (org.junit.Test)11 IntervalLoadRule (org.apache.druid.server.coordinator.rules.IntervalLoadRule)10 DataSegment (org.apache.druid.timeline.DataSegment)7 AuditInfo (org.apache.druid.audit.AuditInfo)6 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)5 ImmutableList (com.google.common.collect.ImmutableList)4 Object2LongMap (it.unimi.dsi.fastutil.objects.Object2LongMap)4 List (java.util.List)4 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)4 ForeverLoadRule (org.apache.druid.server.coordinator.rules.ForeverLoadRule)4 DateTime (org.joda.time.DateTime)4 CountDownLatch (java.util.concurrent.CountDownLatch)3 DruidDataSource (org.apache.druid.client.DruidDataSource)3 DruidServer (org.apache.druid.client.DruidServer)3 ForeverBroadcastDistributionRule (org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule)3 LoadRule (org.apache.druid.server.coordinator.rules.LoadRule)3 Interval (org.joda.time.Interval)3 TypeReference (com.fasterxml.jackson.core.type.TypeReference)2 ArrayList (java.util.ArrayList)2