Search in sources :

Example 1 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class NewestSegmentFirstIterator method findSegmentsToCompact.

/**
 * Find segments to compact together for the given intervalToSearch. It progressively searches the given
 * intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
 * looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
 * found segments are not enough to compact. This is repeated until enough amount of segments are found.
 *
 * @return segments to compact
 */
private SegmentsToCompact findSegmentsToCompact(final String dataSourceName, final CompactibleTimelineObjectHolderCursor compactibleTimelineObjectHolderCursor, final DataSourceCompactionConfig config) {
    final long inputSegmentSize = config.getInputSegmentSizeBytes();
    while (compactibleTimelineObjectHolderCursor.hasNext()) {
        List<DataSegment> segments = compactibleTimelineObjectHolderCursor.next();
        final SegmentsToCompact candidates = new SegmentsToCompact(segments);
        if (!candidates.isEmpty()) {
            final boolean isCompactibleSize = candidates.getTotalSize() <= inputSegmentSize;
            final boolean needsCompaction = needsCompaction(config, candidates);
            if (isCompactibleSize && needsCompaction) {
                if (config.getGranularitySpec() != null && config.getGranularitySpec().getSegmentGranularity() != null) {
                    Interval interval = candidates.getUmbrellaInterval();
                    Set<Interval> intervalsCompacted = intervalCompactedForDatasource.computeIfAbsent(dataSourceName, k -> new HashSet<>());
                    // Skip this candidates if we have compacted the interval already
                    if (intervalsCompacted.contains(interval)) {
                        continue;
                    }
                    intervalsCompacted.add(interval);
                }
                return candidates;
            } else {
                if (!needsCompaction) {
                    // Collect statistic for segments that is already compacted
                    collectSegmentStatistics(compactedSegments, dataSourceName, candidates);
                } else {
                    // Collect statistic for segments that is skipped
                    // Note that if segments does not need compaction then we do not double count here
                    collectSegmentStatistics(skippedSegments, dataSourceName, candidates);
                    log.warn("total segment size[%d] for datasource[%s] and interval[%s] is larger than inputSegmentSize[%d]." + " Continue to the next interval.", candidates.getTotalSize(), candidates.segments.get(0).getDataSource(), candidates.segments.get(0).getInterval(), inputSegmentSize);
                }
            }
        } else {
            throw new ISE("No segment is found?");
        }
    }
    log.info("All segments look good! Nothing to compact");
    return new SegmentsToCompact();
}
Also used : ISE(org.apache.druid.java.util.common.ISE) DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval)

Example 2 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class CompactSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    LOG.info("Compact segments");
    final CoordinatorCompactionConfig dynamicConfig = params.getCoordinatorCompactionConfig();
    final CoordinatorStats stats = new CoordinatorStats();
    List<DataSourceCompactionConfig> compactionConfigList = dynamicConfig.getCompactionConfigs();
    if (dynamicConfig.getMaxCompactionTaskSlots() > 0) {
        Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = params.getUsedSegmentsTimelinesPerDataSource();
        if (compactionConfigList != null && !compactionConfigList.isEmpty()) {
            Map<String, DataSourceCompactionConfig> compactionConfigs = compactionConfigList.stream().collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
            final List<TaskStatusPlus> compactionTasks = filterNonCompactionTasks(indexingServiceClient.getActiveTasks());
            // dataSource -> list of intervals for which compaction will be skipped in this run
            final Map<String, List<Interval>> intervalsToSkipCompaction = new HashMap<>();
            int numEstimatedNonCompleteCompactionTasks = 0;
            for (TaskStatusPlus status : compactionTasks) {
                final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
                if (response == null) {
                    throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
                }
                if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
                    final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
                    DataSourceCompactionConfig dataSourceCompactionConfig = compactionConfigs.get(status.getDataSource());
                    if (dataSourceCompactionConfig != null && dataSourceCompactionConfig.getGranularitySpec() != null) {
                        Granularity configuredSegmentGranularity = dataSourceCompactionConfig.getGranularitySpec().getSegmentGranularity();
                        if (configuredSegmentGranularity != null && compactionTaskQuery.getGranularitySpec() != null && !configuredSegmentGranularity.equals(compactionTaskQuery.getGranularitySpec().getSegmentGranularity())) {
                            // We will cancel active compaction task if segmentGranularity changes and we will need to
                            // re-compact the interval
                            LOG.info("Canceled task[%s] as task segmentGranularity is [%s] but compaction config " + "segmentGranularity is [%s]", status.getId(), compactionTaskQuery.getGranularitySpec().getSegmentGranularity(), configuredSegmentGranularity);
                            indexingServiceClient.cancelTask(status.getId());
                            continue;
                        }
                    }
                    // Skip interval as the current active compaction task is good
                    final Interval interval = compactionTaskQuery.getIoConfig().getInputSpec().getInterval();
                    intervalsToSkipCompaction.computeIfAbsent(status.getDataSource(), k -> new ArrayList<>()).add(interval);
                    // Since we keep the current active compaction task running, we count the active task slots
                    numEstimatedNonCompleteCompactionTasks += findMaxNumTaskSlotsUsedByOneCompactionTask(compactionTaskQuery.getTuningConfig());
                } else {
                    throw new ISE("task[%s] is not a compactionTask", status.getId());
                }
            }
            // Skip all the intervals locked by higher priority tasks for each datasource
            // This must be done after the invalid compaction tasks are cancelled
            // in the loop above so that their intervals are not considered locked
            getLockedIntervalsToSkip(compactionConfigList).forEach((dataSource, intervals) -> intervalsToSkipCompaction.computeIfAbsent(dataSource, ds -> new ArrayList<>()).addAll(intervals));
            final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, intervalsToSkipCompaction);
            int totalCapacity;
            if (dynamicConfig.isUseAutoScaleSlots()) {
                try {
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacityWithAutoScale();
                } catch (Exception e) {
                    LOG.warn("Failed to get total worker capacity with auto scale slots. Falling back to current capacity count");
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
                }
            } else {
                totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
            }
            final int compactionTaskCapacity = (int) Math.min(totalCapacity * dynamicConfig.getCompactionTaskSlotRatio(), dynamicConfig.getMaxCompactionTaskSlots());
            final int numAvailableCompactionTaskSlots;
            if (numEstimatedNonCompleteCompactionTasks > 0) {
                numAvailableCompactionTaskSlots = Math.max(0, compactionTaskCapacity - numEstimatedNonCompleteCompactionTasks);
            } else {
                // compactionTaskCapacity might be 0 if totalWorkerCapacity is low.
                // This guarantees that at least one slot is available if
                // compaction is enabled and numEstimatedNonCompleteCompactionTasks is 0.
                numAvailableCompactionTaskSlots = Math.max(1, compactionTaskCapacity);
            }
            LOG.info("Found [%d] available task slots for compaction out of [%d] max compaction task capacity", numAvailableCompactionTaskSlots, compactionTaskCapacity);
            stats.addToGlobalStat(AVAILABLE_COMPACTION_TASK_SLOT, numAvailableCompactionTaskSlots);
            stats.addToGlobalStat(MAX_COMPACTION_TASK_SLOT, compactionTaskCapacity);
            final Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders = new HashMap<>();
            if (numAvailableCompactionTaskSlots > 0) {
                stats.accumulate(doRun(compactionConfigs, currentRunAutoCompactionSnapshotBuilders, numAvailableCompactionTaskSlots, iterator));
            } else {
                stats.accumulate(makeStats(currentRunAutoCompactionSnapshotBuilders, 0, iterator));
            }
        } else {
            LOG.info("compactionConfig is empty. Skip.");
            autoCompactionSnapshotPerDataSource.set(new HashMap<>());
        }
    } else {
        LOG.info("maxCompactionTaskSlots was set to 0. Skip compaction");
        autoCompactionSnapshotPerDataSource.set(new HashMap<>());
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Inject(com.google.inject.Inject) ClientCompactionTaskDimensionsSpec(org.apache.druid.client.indexing.ClientCompactionTaskDimensionsSpec) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) IndexingServiceClient(org.apache.druid.client.indexing.IndexingServiceClient) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ArrayList(java.util.ArrayList) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) Interval(org.joda.time.Interval) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) Nullable(javax.annotation.Nullable) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) JacksonInject(com.fasterxml.jackson.annotation.JacksonInject) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) CompactionStatistics(org.apache.druid.server.coordinator.CompactionStatistics) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) List(java.util.List) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) GranularityType(org.apache.druid.java.util.common.granularity.GranularityType) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) DataSegment(org.apache.druid.timeline.DataSegment) VisibleForTesting(com.google.common.annotations.VisibleForTesting) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) ArrayList(java.util.ArrayList) List(java.util.List) ISE(org.apache.druid.java.util.common.ISE) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Interval(org.joda.time.Interval)

Example 3 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class DataSourcesResource method computeSegmentLoadStatistics.

private SegmentsLoadStatistics computeSegmentLoadStatistics(Iterable<DataSegment> segments) {
    Map<SegmentId, SegmentLoadInfo> segmentLoadInfos = serverInventoryView.getSegmentLoadInfos();
    int numPublishedSegments = 0;
    int numUnavailableSegments = 0;
    int numLoadedSegments = 0;
    for (DataSegment segment : segments) {
        numPublishedSegments++;
        if (!segmentLoadInfos.containsKey(segment.getId())) {
            numUnavailableSegments++;
        } else {
            numLoadedSegments++;
        }
    }
    return new SegmentsLoadStatistics(numPublishedSegments, numUnavailableSegments, numLoadedSegments);
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) SegmentLoadInfo(org.apache.druid.client.SegmentLoadInfo) ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) DataSegment(org.apache.druid.timeline.DataSegment)

Example 4 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class DataSourcesResource method getDataSource.

@Nullable
private ImmutableDruidDataSource getDataSource(final String dataSourceName) {
    List<DruidDataSource> dataSources = serverInventoryView.getInventory().stream().map(server -> server.getDataSource(dataSourceName)).filter(Objects::nonNull).collect(Collectors.toList());
    if (dataSources.isEmpty()) {
        return null;
    }
    // Note: this logic doesn't guarantee that the result is a snapshot that ever existed in the cluster because all
    // DruidDataSource objects (belonging to different servers) are independently, concurrently mutable objects.
    // But this is OK because a "snapshot" hardly even makes sense in a distributed system anyway.
    final SortedMap<SegmentId, DataSegment> segmentMap = new TreeMap<>();
    for (DruidDataSource dataSource : dataSources) {
        Iterable<DataSegment> segments = dataSource.getSegments();
        for (DataSegment segment : segments) {
            segmentMap.put(segment.getId(), segment);
        }
    }
    return new ImmutableDruidDataSource(dataSourceName, Collections.emptyMap(), segmentMap);
}
Also used : ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) TreeMap(java.util.TreeMap) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) DataSegment(org.apache.druid.timeline.DataSegment) Nullable(javax.annotation.Nullable)

Example 5 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class MarkAsUnusedOvershadowedSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    // Mark as unused overshadowed segments only if we've had enough time to make sure we aren't flapping with old data.
    if (!params.coordinatorIsLeadingEnoughTimeToMarkAsUnusedOvershadowedSegements()) {
        return params;
    }
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = new HashMap<>();
    for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            addSegmentsFromServer(serverHolder, timelines);
        }
    }
    for (ServerHolder serverHolder : cluster.getBrokers()) {
        addSegmentsFromServer(serverHolder, timelines);
    }
    // Mark all segments as unused in db that are overshadowed by served segments
    for (DataSegment dataSegment : params.getUsedSegments()) {
        VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
        if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment)) {
            coordinator.markSegmentAsUnused(dataSegment);
            stats.addToGlobalStat("overShadowedCount", 1);
        }
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment)

Aggregations

DataSegment (org.apache.druid.timeline.DataSegment)612 Test (org.junit.Test)386 ArrayList (java.util.ArrayList)161 Interval (org.joda.time.Interval)158 File (java.io.File)138 Map (java.util.Map)110 List (java.util.List)108 ImmutableList (com.google.common.collect.ImmutableList)77 IOException (java.io.IOException)77 HashMap (java.util.HashMap)74 ImmutableMap (com.google.common.collect.ImmutableMap)72 NumberedShardSpec (org.apache.druid.timeline.partition.NumberedShardSpec)68 HashSet (java.util.HashSet)58 TaskStatus (org.apache.druid.indexer.TaskStatus)53 Collectors (java.util.stream.Collectors)52 Set (java.util.Set)50 CountDownLatch (java.util.concurrent.CountDownLatch)50 ISE (org.apache.druid.java.util.common.ISE)50 SegmentId (org.apache.druid.timeline.SegmentId)47 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)45