Search in sources :

Example 26 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class RunRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
    // eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
    // as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
    // to unload such segments in UnloadUnusedSegments.
    Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
    // Run through all matched rules for used segments
    DateTime now = DateTimes.nowUtc();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    final Set<String> broadcastDatasources = new HashSet<>();
    for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
        for (Rule rule : rules) {
            // executes before BalanceSegments.
            if (rule instanceof BroadcastDistributionRule) {
                broadcastDatasources.add(dataSource.getName());
                break;
            }
        }
    }
    for (DataSegment segment : params.getUsedSegments()) {
        if (overshadowed.contains(segment.getId())) {
            // Skipping overshadowed segments
            continue;
        }
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
                    log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
                    paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
                }
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getId());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) Rule(org.apache.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 27 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegments method makeStats.

private CoordinatorStats makeStats(Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders, int numCompactionTasks, CompactionSegmentIterator iterator) {
    final Map<String, AutoCompactionSnapshot> currentAutoCompactionSnapshotPerDataSource = new HashMap<>();
    final CoordinatorStats stats = new CoordinatorStats();
    stats.addToGlobalStat(COMPACTION_TASK_COUNT, numCompactionTasks);
    // the statistic to the AwaitingCompaction statistics
    while (iterator.hasNext()) {
        final List<DataSegment> segmentsToCompact = iterator.next();
        if (!segmentsToCompact.isEmpty()) {
            final String dataSourceName = segmentsToCompact.get(0).getDataSource();
            AutoCompactionSnapshot.Builder snapshotBuilder = currentRunAutoCompactionSnapshotBuilders.computeIfAbsent(dataSourceName, k -> new AutoCompactionSnapshot.Builder(k, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING));
            snapshotBuilder.incrementBytesAwaitingCompaction(segmentsToCompact.stream().mapToLong(DataSegment::getSize).sum());
            snapshotBuilder.incrementIntervalCountAwaitingCompaction(segmentsToCompact.stream().map(DataSegment::getInterval).distinct().count());
            snapshotBuilder.incrementSegmentCountAwaitingCompaction(segmentsToCompact.size());
        }
    }
    // Statistics of all segments considered compacted after this run
    Map<String, CompactionStatistics> allCompactedStatistics = iterator.totalCompactedStatistics();
    for (Map.Entry<String, CompactionStatistics> compactionStatisticsEntry : allCompactedStatistics.entrySet()) {
        final String dataSource = compactionStatisticsEntry.getKey();
        final CompactionStatistics dataSourceCompactedStatistics = compactionStatisticsEntry.getValue();
        AutoCompactionSnapshot.Builder builder = currentRunAutoCompactionSnapshotBuilders.computeIfAbsent(dataSource, k -> new AutoCompactionSnapshot.Builder(k, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING));
        builder.incrementBytesCompacted(dataSourceCompactedStatistics.getByteSum());
        builder.incrementSegmentCountCompacted(dataSourceCompactedStatistics.getSegmentNumberCountSum());
        builder.incrementIntervalCountCompacted(dataSourceCompactedStatistics.getSegmentIntervalCountSum());
    }
    // Statistics of all segments considered skipped after this run
    Map<String, CompactionStatistics> allSkippedStatistics = iterator.totalSkippedStatistics();
    for (Map.Entry<String, CompactionStatistics> compactionStatisticsEntry : allSkippedStatistics.entrySet()) {
        final String dataSource = compactionStatisticsEntry.getKey();
        final CompactionStatistics dataSourceSkippedStatistics = compactionStatisticsEntry.getValue();
        AutoCompactionSnapshot.Builder builder = currentRunAutoCompactionSnapshotBuilders.computeIfAbsent(dataSource, k -> new AutoCompactionSnapshot.Builder(k, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING));
        builder.incrementBytesSkipped(dataSourceSkippedStatistics.getByteSum());
        builder.incrementSegmentCountSkipped(dataSourceSkippedStatistics.getSegmentNumberCountSum());
        builder.incrementIntervalCountSkipped(dataSourceSkippedStatistics.getSegmentIntervalCountSum());
    }
    for (Map.Entry<String, AutoCompactionSnapshot.Builder> autoCompactionSnapshotBuilderEntry : currentRunAutoCompactionSnapshotBuilders.entrySet()) {
        final String dataSource = autoCompactionSnapshotBuilderEntry.getKey();
        final AutoCompactionSnapshot.Builder builder = autoCompactionSnapshotBuilderEntry.getValue();
        // Build the complete snapshot for the datasource
        AutoCompactionSnapshot autoCompactionSnapshot = builder.build();
        currentAutoCompactionSnapshotPerDataSource.put(dataSource, autoCompactionSnapshot);
        // Use the complete snapshot to emits metrics
        stats.addToDataSourceStat(TOTAL_SIZE_OF_SEGMENTS_AWAITING, dataSource, autoCompactionSnapshot.getBytesAwaitingCompaction());
        stats.addToDataSourceStat(TOTAL_COUNT_OF_SEGMENTS_AWAITING, dataSource, autoCompactionSnapshot.getSegmentCountAwaitingCompaction());
        stats.addToDataSourceStat(TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, dataSource, autoCompactionSnapshot.getIntervalCountAwaitingCompaction());
        stats.addToDataSourceStat(TOTAL_SIZE_OF_SEGMENTS_COMPACTED, dataSource, autoCompactionSnapshot.getBytesCompacted());
        stats.addToDataSourceStat(TOTAL_COUNT_OF_SEGMENTS_COMPACTED, dataSource, autoCompactionSnapshot.getSegmentCountCompacted());
        stats.addToDataSourceStat(TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, dataSource, autoCompactionSnapshot.getIntervalCountCompacted());
        stats.addToDataSourceStat(TOTAL_SIZE_OF_SEGMENTS_SKIPPED, dataSource, autoCompactionSnapshot.getBytesSkipped());
        stats.addToDataSourceStat(TOTAL_COUNT_OF_SEGMENTS_SKIPPED, dataSource, autoCompactionSnapshot.getSegmentCountSkipped());
        stats.addToDataSourceStat(TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, dataSource, autoCompactionSnapshot.getIntervalCountSkipped());
    }
    // Atomic update of autoCompactionSnapshotPerDataSource with the latest from this coordinator run
    autoCompactionSnapshotPerDataSource.set(currentAutoCompactionSnapshotPerDataSource);
    return stats;
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) HashMap(java.util.HashMap) CompactionStatistics(org.apache.druid.server.coordinator.CompactionStatistics) DataSegment(org.apache.druid.timeline.DataSegment) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) HashMap(java.util.HashMap) Map(java.util.Map)

Example 28 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class UnloadUnusedSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    Set<DataSegment> usedSegments = params.getUsedSegments();
    DruidCluster cluster = params.getDruidCluster();
    Map<String, Boolean> broadcastStatusByDatasource = new HashMap<>();
    for (String broadcastDatasource : params.getBroadcastDatasources()) {
        broadcastStatusByDatasource.put(broadcastDatasource, true);
    }
    for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
        }
    }
    for (ServerHolder serverHolder : cluster.getBrokers()) {
        handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
    }
    for (ServerHolder serverHolder : cluster.getRealtimes()) {
        handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, true, broadcastStatusByDatasource);
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment)

Example 29 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class DropRule method run.

@Override
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment) {
    CoordinatorStats stats = new CoordinatorStats();
    coordinator.markSegmentAsUnused(segment);
    stats.addToGlobalStat("deletedCount", 1);
    return stats;
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats)

Example 30 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class LoadRuleTest method testLoad.

@Test
public void testLoad() {
    EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
    final LoadQueuePeon mockPeon = createEmptyPeon();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1, DruidServer.DEFAULT_TIER, 2));
    final DataSegment segment = createDataSegment("foo");
    throttler.registerReplicantCreation(DruidServer.DEFAULT_TIER, segment.getId(), "hostNorm");
    EasyMock.expectLastCall().once();
    EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(3);
    EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer(), mockPeon)).addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)).build();
    CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
    Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, DruidServer.DEFAULT_TIER));
    EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)45 DataSegment (org.apache.druid.timeline.DataSegment)31 Test (org.junit.Test)31 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)24 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 DruidServer (org.apache.druid.client.DruidServer)15 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)14 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 HttpIndexingServiceClient (org.apache.druid.client.indexing.HttpIndexingServiceClient)11 HashMap (java.util.HashMap)10 ArrayList (java.util.ArrayList)9 AutoCompactionSnapshot (org.apache.druid.server.coordinator.AutoCompactionSnapshot)9 List (java.util.List)7 Map (java.util.Map)6 DateTimes (org.apache.druid.java.util.common.DateTimes)6 Intervals (org.apache.druid.java.util.common.Intervals)6 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)6 Assert (org.junit.Assert)6 Before (org.junit.Before)6