use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.
the class DataSourcesResource method isHandOffComplete.
/**
* Used by the realtime tasks to learn whether a segment is handed off or not.
* It returns true when the segment will never be handed off or is already handed off. Otherwise, it returns false.
*/
@GET
@Path("/{dataSourceName}/handoffComplete")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(DatasourceResourceFilter.class)
public Response isHandOffComplete(@PathParam("dataSourceName") String dataSourceName, @QueryParam("interval") final String interval, @QueryParam("partitionNumber") final int partitionNumber, @QueryParam("version") final String version) {
try {
final List<Rule> rules = metadataRuleManager.getRulesWithDefault(dataSourceName);
final Interval theInterval = Intervals.of(interval);
final SegmentDescriptor descriptor = new SegmentDescriptor(theInterval, version, partitionNumber);
final DateTime now = DateTimes.nowUtc();
// dropped means a segment will never be handed off, i.e it completed hand off
// init to true, reset to false only if this segment can be loaded by rules
boolean dropped = true;
for (Rule rule : rules) {
if (rule.appliesTo(theInterval, now)) {
if (rule instanceof LoadRule) {
dropped = false;
}
break;
}
}
if (dropped) {
return Response.ok(true).build();
}
TimelineLookup<String, SegmentLoadInfo> timeline = serverInventoryView.getTimeline(new TableDataSource(dataSourceName));
if (timeline == null) {
log.debug("No timeline found for datasource[%s]", dataSourceName);
return Response.ok(false).build();
}
Iterable<ImmutableSegmentLoadInfo> servedSegmentsInInterval = prepareServedSegmentsInInterval(timeline, theInterval);
if (isSegmentLoaded(servedSegmentsInInterval, descriptor)) {
return Response.ok(true).build();
}
return Response.ok(false).build();
} catch (Exception e) {
log.error(e, "Error while handling hand off check request");
return Response.serverError().entity(ImmutableMap.of("error", e.toString())).build();
}
}
use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.
the class DruidCoordinator method computeUnderReplicationCountsPerDataSourcePerTierForSegmentsInternal.
private Map<String, Object2LongMap<String>> computeUnderReplicationCountsPerDataSourcePerTierForSegmentsInternal(Iterable<DataSegment> dataSegments, boolean computeUsingClusterView) {
final Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = new HashMap<>();
if (segmentReplicantLookup == null) {
return underReplicationCountsPerDataSourcePerTier;
}
if (computeUsingClusterView && cluster == null) {
throw new ServiceUnavailableException("coordinator hasn't populated information about cluster yet, try again later");
}
final DateTime now = DateTimes.nowUtc();
for (final DataSegment segment : dataSegments) {
final List<Rule> rules = metadataRuleManager.getRulesWithDefault(segment.getDataSource());
for (final Rule rule : rules) {
if (!rule.appliesTo(segment, now)) {
// Rule did not match. Continue to the next Rule.
continue;
}
if (!rule.canLoadSegments()) {
// Hence, there is no need to update underReplicationCountsPerDataSourcePerTier map
break;
}
if (computeUsingClusterView) {
rule.updateUnderReplicatedWithClusterView(underReplicationCountsPerDataSourcePerTier, segmentReplicantLookup, cluster, segment);
} else {
rule.updateUnderReplicated(underReplicationCountsPerDataSourcePerTier, segmentReplicantLookup, segment);
}
// and match each segment with the first rule that applies. Each segment may only match a single rule.
break;
}
}
return underReplicationCountsPerDataSourcePerTier;
}
use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.
the class RunRules method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
CoordinatorStats stats = new CoordinatorStats();
DruidCluster cluster = params.getDruidCluster();
if (cluster.isEmpty()) {
log.warn("Uh... I have no servers. Not assigning anything...");
return params;
}
// Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
// eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
// as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
// to unload such segments in UnloadUnusedSegments.
Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
for (String tier : cluster.getTierNames()) {
replicatorThrottler.updateReplicationState(tier);
}
DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
// Run through all matched rules for used segments
DateTime now = DateTimes.nowUtc();
MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
int missingRules = 0;
final Set<String> broadcastDatasources = new HashSet<>();
for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
for (Rule rule : rules) {
// executes before BalanceSegments.
if (rule instanceof BroadcastDistributionRule) {
broadcastDatasources.add(dataSource.getName());
break;
}
}
}
for (DataSegment segment : params.getUsedSegments()) {
if (overshadowed.contains(segment.getId())) {
// Skipping overshadowed segments
continue;
}
List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
boolean foundMatchingRule = false;
for (Rule rule : rules) {
if (rule.appliesTo(segment, now)) {
if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
}
stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
foundMatchingRule = true;
break;
}
}
if (!foundMatchingRule) {
if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
segmentsWithMissingRules.add(segment.getId());
}
missingRules++;
}
}
if (!segmentsWithMissingRules.isEmpty()) {
log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
}
return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testRemoveRulesOlderThanWithNonExistenceDatasourceAndNewerThanTimestampShouldNotDelete.
@Test
public void testRemoveRulesOlderThanWithNonExistenceDatasourceAndNewerThanTimestampShouldNotDelete() {
List<Rule> rules = ImmutableList.of(new IntervalLoadRule(Intervals.of("2015-01-01/2015-02-01"), ImmutableMap.of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
// Verify that rule was added
ruleManager.poll();
Map<String, List<Rule>> allRules = ruleManager.getAllRules();
Assert.assertEquals(1, allRules.size());
Assert.assertEquals(1, allRules.get("test_dataSource").size());
// This will not delete the rule as the rule was created just now so it will have the created timestamp later than
// the timestamp 2012-01-01T00:00:00Z
ruleManager.removeRulesForEmptyDatasourcesOlderThan(DateTimes.of("2012-01-01T00:00:00Z").getMillis());
// Verify that rule was not deleted
ruleManager.poll();
allRules = ruleManager.getAllRules();
Assert.assertEquals(1, allRules.size());
Assert.assertEquals(1, allRules.get("test_dataSource").size());
}
use of org.apache.druid.server.coordinator.rules.Rule in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testAuditEntryCreated.
@Test
public void testAuditEntryCreated() throws Exception {
List<Rule> rules = Collections.singletonList(new IntervalLoadRule(Intervals.of("2015-01-01/2015-02-01"), ImmutableMap.of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
// fetch rules from metadata storage
ruleManager.poll();
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource"));
// verify audit entry is created
List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("test_dataSource", "rules", null);
Assert.assertEquals(1, auditEntries.size());
AuditEntry entry = auditEntries.get(0);
Assert.assertEquals(rules, mapper.readValue(entry.getPayload(), new TypeReference<List<Rule>>() {
}));
Assert.assertEquals(auditInfo, entry.getAuditInfo());
Assert.assertEquals("test_dataSource", entry.getKey());
}
Aggregations