Search in sources :

Example 6 with DruidCoordinatorRuntimeParams

use of org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams in project druid by druid-io.

the class KillCompactionConfig method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    long currentTimeMillis = System.currentTimeMillis();
    if ((lastKillTime + period) < currentTimeMillis) {
        lastKillTime = currentTimeMillis;
        try {
            RetryUtils.retry(() -> {
                final byte[] currentBytes = CoordinatorCompactionConfig.getConfigInByteFromDb(connector, connectorConfig);
                final CoordinatorCompactionConfig current = CoordinatorCompactionConfig.convertByteToConfig(jacksonConfigManager, currentBytes);
                // If current compaction config is empty then there is nothing to do
                if (CoordinatorCompactionConfig.empty().equals(current)) {
                    log.info("Finished running KillCompactionConfig duty. Nothing to do as compaction config is already empty.");
                    emitMetric(params.getEmitter(), 0);
                    return ConfigManager.SetResult.ok();
                }
                // Get all active datasources
                // Note that we get all active datasources after getting compaction config to prevent race condition if new
                // datasource and config are added.
                Set<String> activeDatasources = sqlSegmentsMetadataManager.retrieveAllDataSourceNames();
                final Map<String, DataSourceCompactionConfig> updated = current.getCompactionConfigs().stream().filter(dataSourceCompactionConfig -> activeDatasources.contains(dataSourceCompactionConfig.getDataSource())).collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
                // Calculate number of compaction configs to remove for logging
                int compactionConfigRemoved = current.getCompactionConfigs().size() - updated.size();
                ConfigManager.SetResult result = jacksonConfigManager.set(CoordinatorCompactionConfig.CONFIG_KEY, currentBytes, CoordinatorCompactionConfig.from(current, ImmutableList.copyOf(updated.values())), new AuditInfo("KillCompactionConfig", "CoordinatorDuty for automatic deletion of compaction config", ""));
                if (result.isOk()) {
                    log.info("Finished running KillCompactionConfig duty. Removed %,d compaction configs", compactionConfigRemoved);
                    emitMetric(params.getEmitter(), compactionConfigRemoved);
                } else if (result.isRetryable()) {
                    // Failed but is retryable
                    log.debug("Retrying KillCompactionConfig duty");
                    throw new RetryableException(result.getException());
                } else {
                    // Failed and not retryable
                    log.error(result.getException(), "Failed to kill compaction configurations");
                    emitMetric(params.getEmitter(), 0);
                }
                return result;
            }, e -> e instanceof RetryableException, UPDATE_NUM_RETRY);
        } catch (Exception e) {
            log.error(e, "Failed to kill compaction configurations");
            emitMetric(params.getEmitter(), 0);
        }
    }
    return params;
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) AuditInfo(org.apache.druid.audit.AuditInfo) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) Inject(com.google.inject.Inject) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Set(java.util.Set) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) RetryableException(org.apache.druid.java.util.RetryableException) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) MetadataStorageConnector(org.apache.druid.metadata.MetadataStorageConnector) ImmutableList(com.google.common.collect.ImmutableList) ConfigManager(org.apache.druid.common.config.ConfigManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) Preconditions(com.google.common.base.Preconditions) SqlSegmentsMetadataManager(org.apache.druid.metadata.SqlSegmentsMetadataManager) RetryUtils(org.apache.druid.java.util.common.RetryUtils) AuditInfo(org.apache.druid.audit.AuditInfo) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) ConfigManager(org.apache.druid.common.config.ConfigManager) RetryableException(org.apache.druid.java.util.RetryableException) RetryableException(org.apache.druid.java.util.RetryableException) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig)

Example 7 with DruidCoordinatorRuntimeParams

use of org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams in project druid by druid-io.

the class RunRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
    // eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
    // as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
    // to unload such segments in UnloadUnusedSegments.
    Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
    // Run through all matched rules for used segments
    DateTime now = DateTimes.nowUtc();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    final Set<String> broadcastDatasources = new HashSet<>();
    for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
        for (Rule rule : rules) {
            // executes before BalanceSegments.
            if (rule instanceof BroadcastDistributionRule) {
                broadcastDatasources.add(dataSource.getName());
                break;
            }
        }
    }
    for (DataSegment segment : params.getUsedSegments()) {
        if (overshadowed.contains(segment.getId())) {
            // Skipping overshadowed segments
            continue;
        }
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
                    log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
                    paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
                }
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getId());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) Rule(org.apache.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 8 with DruidCoordinatorRuntimeParams

use of org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams in project druid by druid-io.

the class KillDatasourceMetadata method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    long currentTimeMillis = System.currentTimeMillis();
    if ((lastKillTime + period) < currentTimeMillis) {
        lastKillTime = currentTimeMillis;
        long timestamp = currentTimeMillis - retainDuration;
        try {
            // Datasource metadata only exists for datasource with supervisor
            // To determine if datasource metadata is still active, we check if the supervisor for that particular datasource
            // is still active or not
            Map<String, SupervisorSpec> allActiveSupervisor = metadataSupervisorManager.getLatestActiveOnly();
            Set<String> allDatasourceWithActiveSupervisor = allActiveSupervisor.values().stream().map(supervisorSpec -> supervisorSpec.getDataSources()).flatMap(Collection::stream).filter(datasource -> !Strings.isNullOrEmpty(datasource)).collect(Collectors.toSet());
            // We exclude removing datasource metadata with active supervisor
            int datasourceMetadataRemovedCount = indexerMetadataStorageCoordinator.removeDataSourceMetadataOlderThan(timestamp, allDatasourceWithActiveSupervisor);
            ServiceEmitter emitter = params.getEmitter();
            emitter.emit(new ServiceMetricEvent.Builder().build("metadata/kill/datasource/count", datasourceMetadataRemovedCount));
            log.info("Finished running KillDatasourceMetadata duty. Removed %,d datasource metadata", datasourceMetadataRemovedCount);
        } catch (Exception e) {
            log.error(e, "Failed to kill datasource metadata");
        }
    }
    return params;
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) Inject(com.google.inject.Inject) Collection(java.util.Collection) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Set(java.util.Set) Collectors(java.util.stream.Collectors) SupervisorSpec(org.apache.druid.indexing.overlord.supervisor.SupervisorSpec) Strings(com.google.common.base.Strings) MetadataSupervisorManager(org.apache.druid.metadata.MetadataSupervisorManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) Preconditions(com.google.common.base.Preconditions) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Collection(java.util.Collection) SupervisorSpec(org.apache.druid.indexing.overlord.supervisor.SupervisorSpec)

Example 9 with DruidCoordinatorRuntimeParams

use of org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams in project druid by druid-io.

the class LoadRuleTest method testMaxLoadingQueueSize.

@Test
public void testMaxLoadingQueueSize() {
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(2);
    EasyMock.replay(throttler, mockBalancerStrategy);
    final LoadQueuePeonTester peon = new LoadQueuePeonTester();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 0).toImmutableDruidServer(), peon)).build();
    DataSegment dataSegment1 = createDataSegment("ds1");
    DataSegment dataSegment2 = createDataSegment("ds2");
    DataSegment dataSegment3 = createDataSegment("ds3");
    DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(druidCluster).withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster, false)).withReplicationManager(throttler).withBalancerStrategy(mockBalancerStrategy).withUsedSegmentsInTest(dataSegment1, dataSegment2, dataSegment3).withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsInNodeLoadingQueue(2).build()).build();
    CoordinatorStats stats1 = rule.run(null, params, dataSegment1);
    CoordinatorStats stats2 = rule.run(null, params, dataSegment2);
    CoordinatorStats stats3 = rule.run(null, params, dataSegment3);
    Assert.assertEquals(1L, stats1.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    Assert.assertEquals(1L, stats2.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    Assert.assertFalse(stats3.getTiers(LoadRule.ASSIGNED_COUNT).contains("hot"));
    EasyMock.verify(throttler, mockBalancerStrategy);
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) LoadQueuePeonTester(org.apache.druid.server.coordinator.LoadQueuePeonTester) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 10 with DruidCoordinatorRuntimeParams

use of org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams in project druid by druid-io.

the class LoadRuleTest method testDropDuringDecommissioning.

/**
 * 2 servers with a segment, one server decommissioning.
 * Should drop a segment from both.
 */
@Test
public void testDropDuringDecommissioning() {
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().times(2);
    EasyMock.expect(mockBalancerStrategy.pickServersToDrop(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(4);
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 0));
    final DataSegment segment1 = createDataSegment("foo1");
    final DataSegment segment2 = createDataSegment("foo2");
    DruidServer server1 = createServer("tier1");
    server1.addDataSegment(segment1);
    DruidServer server2 = createServer("tier1");
    server2.addDataSegment(segment2);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", new ServerHolder(server1.toImmutableDruidServer(), mockPeon, true), new ServerHolder(server2.toImmutableDruidServer(), mockPeon, false)).build();
    DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, segment1, segment2);
    CoordinatorStats stats = rule.run(null, params, segment1);
    Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
    stats = rule.run(null, params, segment2);
    Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
    EasyMock.verify(throttler, mockPeon);
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)11 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)9 DataSegment (org.apache.druid.timeline.DataSegment)8 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)7 Map (java.util.Map)5 Collectors (java.util.stream.Collectors)5 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)5 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)4 Logger (org.apache.druid.java.util.common.logger.Logger)4 Inject (com.google.inject.Inject)3 Set (java.util.Set)3 ServiceEmitter (org.apache.druid.java.util.emitter.service.ServiceEmitter)3 ServiceMetricEvent (org.apache.druid.java.util.emitter.service.ServiceMetricEvent)3 DruidCoordinator (org.apache.druid.server.coordinator.DruidCoordinator)3 DruidCoordinatorConfig (org.apache.druid.server.coordinator.DruidCoordinatorConfig)3 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)3 Test (org.junit.Test)3 Preconditions (com.google.common.base.Preconditions)2 Object2LongMap (it.unimi.dsi.fastutil.objects.Object2LongMap)2 HashMap (java.util.HashMap)2