Search in sources :

Example 1 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    LOG.info("Compact segments");
    final CoordinatorCompactionConfig dynamicConfig = params.getCoordinatorCompactionConfig();
    final CoordinatorStats stats = new CoordinatorStats();
    List<DataSourceCompactionConfig> compactionConfigList = dynamicConfig.getCompactionConfigs();
    if (dynamicConfig.getMaxCompactionTaskSlots() > 0) {
        Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = params.getUsedSegmentsTimelinesPerDataSource();
        if (compactionConfigList != null && !compactionConfigList.isEmpty()) {
            Map<String, DataSourceCompactionConfig> compactionConfigs = compactionConfigList.stream().collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
            final List<TaskStatusPlus> compactionTasks = filterNonCompactionTasks(indexingServiceClient.getActiveTasks());
            // dataSource -> list of intervals for which compaction will be skipped in this run
            final Map<String, List<Interval>> intervalsToSkipCompaction = new HashMap<>();
            int numEstimatedNonCompleteCompactionTasks = 0;
            for (TaskStatusPlus status : compactionTasks) {
                final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
                if (response == null) {
                    throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
                }
                if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
                    final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
                    DataSourceCompactionConfig dataSourceCompactionConfig = compactionConfigs.get(status.getDataSource());
                    if (dataSourceCompactionConfig != null && dataSourceCompactionConfig.getGranularitySpec() != null) {
                        Granularity configuredSegmentGranularity = dataSourceCompactionConfig.getGranularitySpec().getSegmentGranularity();
                        if (configuredSegmentGranularity != null && compactionTaskQuery.getGranularitySpec() != null && !configuredSegmentGranularity.equals(compactionTaskQuery.getGranularitySpec().getSegmentGranularity())) {
                            // We will cancel active compaction task if segmentGranularity changes and we will need to
                            // re-compact the interval
                            LOG.info("Canceled task[%s] as task segmentGranularity is [%s] but compaction config " + "segmentGranularity is [%s]", status.getId(), compactionTaskQuery.getGranularitySpec().getSegmentGranularity(), configuredSegmentGranularity);
                            indexingServiceClient.cancelTask(status.getId());
                            continue;
                        }
                    }
                    // Skip interval as the current active compaction task is good
                    final Interval interval = compactionTaskQuery.getIoConfig().getInputSpec().getInterval();
                    intervalsToSkipCompaction.computeIfAbsent(status.getDataSource(), k -> new ArrayList<>()).add(interval);
                    // Since we keep the current active compaction task running, we count the active task slots
                    numEstimatedNonCompleteCompactionTasks += findMaxNumTaskSlotsUsedByOneCompactionTask(compactionTaskQuery.getTuningConfig());
                } else {
                    throw new ISE("task[%s] is not a compactionTask", status.getId());
                }
            }
            // Skip all the intervals locked by higher priority tasks for each datasource
            // This must be done after the invalid compaction tasks are cancelled
            // in the loop above so that their intervals are not considered locked
            getLockedIntervalsToSkip(compactionConfigList).forEach((dataSource, intervals) -> intervalsToSkipCompaction.computeIfAbsent(dataSource, ds -> new ArrayList<>()).addAll(intervals));
            final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, intervalsToSkipCompaction);
            int totalCapacity;
            if (dynamicConfig.isUseAutoScaleSlots()) {
                try {
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacityWithAutoScale();
                } catch (Exception e) {
                    LOG.warn("Failed to get total worker capacity with auto scale slots. Falling back to current capacity count");
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
                }
            } else {
                totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
            }
            final int compactionTaskCapacity = (int) Math.min(totalCapacity * dynamicConfig.getCompactionTaskSlotRatio(), dynamicConfig.getMaxCompactionTaskSlots());
            final int numAvailableCompactionTaskSlots;
            if (numEstimatedNonCompleteCompactionTasks > 0) {
                numAvailableCompactionTaskSlots = Math.max(0, compactionTaskCapacity - numEstimatedNonCompleteCompactionTasks);
            } else {
                // compactionTaskCapacity might be 0 if totalWorkerCapacity is low.
                // This guarantees that at least one slot is available if
                // compaction is enabled and numEstimatedNonCompleteCompactionTasks is 0.
                numAvailableCompactionTaskSlots = Math.max(1, compactionTaskCapacity);
            }
            LOG.info("Found [%d] available task slots for compaction out of [%d] max compaction task capacity", numAvailableCompactionTaskSlots, compactionTaskCapacity);
            stats.addToGlobalStat(AVAILABLE_COMPACTION_TASK_SLOT, numAvailableCompactionTaskSlots);
            stats.addToGlobalStat(MAX_COMPACTION_TASK_SLOT, compactionTaskCapacity);
            final Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders = new HashMap<>();
            if (numAvailableCompactionTaskSlots > 0) {
                stats.accumulate(doRun(compactionConfigs, currentRunAutoCompactionSnapshotBuilders, numAvailableCompactionTaskSlots, iterator));
            } else {
                stats.accumulate(makeStats(currentRunAutoCompactionSnapshotBuilders, 0, iterator));
            }
        } else {
            LOG.info("compactionConfig is empty. Skip.");
            autoCompactionSnapshotPerDataSource.set(new HashMap<>());
        }
    } else {
        LOG.info("maxCompactionTaskSlots was set to 0. Skip compaction");
        autoCompactionSnapshotPerDataSource.set(new HashMap<>());
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Inject(com.google.inject.Inject) ClientCompactionTaskDimensionsSpec(org.apache.druid.client.indexing.ClientCompactionTaskDimensionsSpec) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) IndexingServiceClient(org.apache.druid.client.indexing.IndexingServiceClient) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ArrayList(java.util.ArrayList) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) Interval(org.joda.time.Interval) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) Nullable(javax.annotation.Nullable) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) JacksonInject(com.fasterxml.jackson.annotation.JacksonInject) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) CompactionStatistics(org.apache.druid.server.coordinator.CompactionStatistics) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) List(java.util.List) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) GranularityType(org.apache.druid.java.util.common.granularity.GranularityType) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) DataSegment(org.apache.druid.timeline.DataSegment) VisibleForTesting(com.google.common.annotations.VisibleForTesting) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) ArrayList(java.util.ArrayList) List(java.util.List) ISE(org.apache.druid.java.util.common.ISE) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Interval(org.joda.time.Interval)

Example 2 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class MarkAsUnusedOvershadowedSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    // Mark as unused overshadowed segments only if we've had enough time to make sure we aren't flapping with old data.
    if (!params.coordinatorIsLeadingEnoughTimeToMarkAsUnusedOvershadowedSegements()) {
        return params;
    }
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = new HashMap<>();
    for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            addSegmentsFromServer(serverHolder, timelines);
        }
    }
    for (ServerHolder serverHolder : cluster.getBrokers()) {
        addSegmentsFromServer(serverHolder, timelines);
    }
    // Mark all segments as unused in db that are overshadowed by served segments
    for (DataSegment dataSegment : params.getUsedSegments()) {
        VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
        if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment)) {
            coordinator.markSegmentAsUnused(dataSegment);
            stats.addToGlobalStat("overShadowedCount", 1);
        }
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment)

Example 3 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class BroadcastDistributionRule method assign.

private CoordinatorStats assign(final Set<ServerHolder> serverHolders, final DataSegment segment) {
    final CoordinatorStats stats = new CoordinatorStats();
    stats.addToGlobalStat(LoadRule.ASSIGNED_COUNT, 0);
    for (ServerHolder holder : serverHolders) {
        if (segment.getSize() > holder.getAvailableSize()) {
            log.makeAlert("Failed to broadcast segment for [%s]", segment.getDataSource()).addData("segmentId", segment.getId()).addData("segmentSize", segment.getSize()).addData("hostName", holder.getServer().getHost()).addData("availableSize", holder.getAvailableSize()).emit();
        } else {
            if (!holder.isLoadingSegment(segment)) {
                holder.getPeon().loadSegment(segment, null);
                stats.addToGlobalStat(LoadRule.ASSIGNED_COUNT, 1);
            }
        }
    }
    return stats;
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder)

Example 4 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class BroadcastDistributionRule method drop.

private CoordinatorStats drop(final Set<ServerHolder> serverHolders, final DataSegment segment) {
    CoordinatorStats stats = new CoordinatorStats();
    for (ServerHolder holder : serverHolders) {
        holder.getPeon().dropSegment(segment, null);
        stats.addToGlobalStat(LoadRule.DROPPED_COUNT, 1);
    }
    return stats;
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder)

Example 5 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class BroadcastDistributionRule method run.

@Override
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment) {
    final Set<ServerHolder> dropServerHolders = new HashSet<>();
    // Find servers where we need to load the broadcast segments
    final Set<ServerHolder> loadServerHolders = params.getDruidCluster().getAllServers().stream().filter((serverHolder) -> {
        ServerType serverType = serverHolder.getServer().getType();
        if (!serverType.isSegmentBroadcastTarget()) {
            return false;
        }
        final boolean isServingSegment = serverHolder.isServingSegment(segment);
        if (serverHolder.isDecommissioning()) {
            if (isServingSegment && !serverHolder.isDroppingSegment(segment)) {
                dropServerHolders.add(serverHolder);
            }
            return false;
        }
        return !isServingSegment && !serverHolder.isLoadingSegment(segment);
    }).collect(Collectors.toSet());
    final CoordinatorStats stats = new CoordinatorStats();
    return stats.accumulate(assign(loadServerHolders, segment)).accumulate(drop(dropServerHolders, segment));
}
Also used : EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Set(java.util.Set) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Collectors(java.util.stream.Collectors) HashSet(java.util.HashSet) SegmentReplicantLookup(org.apache.druid.server.coordinator.SegmentReplicantLookup) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) Object2LongOpenHashMap(it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap) ServerType(org.apache.druid.server.coordination.ServerType) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashSet(java.util.HashSet)

Aggregations

CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)45 DataSegment (org.apache.druid.timeline.DataSegment)31 Test (org.junit.Test)31 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)24 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 DruidServer (org.apache.druid.client.DruidServer)15 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)14 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 HttpIndexingServiceClient (org.apache.druid.client.indexing.HttpIndexingServiceClient)11 HashMap (java.util.HashMap)10 ArrayList (java.util.ArrayList)9 AutoCompactionSnapshot (org.apache.druid.server.coordinator.AutoCompactionSnapshot)9 List (java.util.List)7 Map (java.util.Map)6 DateTimes (org.apache.druid.java.util.common.DateTimes)6 Intervals (org.apache.druid.java.util.common.Intervals)6 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)6 Assert (org.junit.Assert)6 Before (org.junit.Before)6