use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class NewestSegmentFirstIterator method findSegmentsToCompact.
/**
* Find segments to compact together for the given intervalToSearch. It progressively searches the given
* intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
* looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
* found segments are not enough to compact. This is repeated until enough amount of segments are found.
*
* @return segments to compact
*/
private SegmentsToCompact findSegmentsToCompact(final String dataSourceName, final CompactibleTimelineObjectHolderCursor compactibleTimelineObjectHolderCursor, final DataSourceCompactionConfig config) {
final long inputSegmentSize = config.getInputSegmentSizeBytes();
while (compactibleTimelineObjectHolderCursor.hasNext()) {
List<DataSegment> segments = compactibleTimelineObjectHolderCursor.next();
final SegmentsToCompact candidates = new SegmentsToCompact(segments);
if (!candidates.isEmpty()) {
final boolean isCompactibleSize = candidates.getTotalSize() <= inputSegmentSize;
final boolean needsCompaction = needsCompaction(config, candidates);
if (isCompactibleSize && needsCompaction) {
if (config.getGranularitySpec() != null && config.getGranularitySpec().getSegmentGranularity() != null) {
Interval interval = candidates.getUmbrellaInterval();
Set<Interval> intervalsCompacted = intervalCompactedForDatasource.computeIfAbsent(dataSourceName, k -> new HashSet<>());
// Skip this candidates if we have compacted the interval already
if (intervalsCompacted.contains(interval)) {
continue;
}
intervalsCompacted.add(interval);
}
return candidates;
} else {
if (!needsCompaction) {
// Collect statistic for segments that is already compacted
collectSegmentStatistics(compactedSegments, dataSourceName, candidates);
} else {
// Collect statistic for segments that is skipped
// Note that if segments does not need compaction then we do not double count here
collectSegmentStatistics(skippedSegments, dataSourceName, candidates);
log.warn("total segment size[%d] for datasource[%s] and interval[%s] is larger than inputSegmentSize[%d]." + " Continue to the next interval.", candidates.getTotalSize(), candidates.segments.get(0).getDataSource(), candidates.segments.get(0).getInterval(), inputSegmentSize);
}
}
} else {
throw new ISE("No segment is found?");
}
}
log.info("All segments look good! Nothing to compact");
return new SegmentsToCompact();
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class CompactSegments method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
LOG.info("Compact segments");
final CoordinatorCompactionConfig dynamicConfig = params.getCoordinatorCompactionConfig();
final CoordinatorStats stats = new CoordinatorStats();
List<DataSourceCompactionConfig> compactionConfigList = dynamicConfig.getCompactionConfigs();
if (dynamicConfig.getMaxCompactionTaskSlots() > 0) {
Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = params.getUsedSegmentsTimelinesPerDataSource();
if (compactionConfigList != null && !compactionConfigList.isEmpty()) {
Map<String, DataSourceCompactionConfig> compactionConfigs = compactionConfigList.stream().collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
final List<TaskStatusPlus> compactionTasks = filterNonCompactionTasks(indexingServiceClient.getActiveTasks());
// dataSource -> list of intervals for which compaction will be skipped in this run
final Map<String, List<Interval>> intervalsToSkipCompaction = new HashMap<>();
int numEstimatedNonCompleteCompactionTasks = 0;
for (TaskStatusPlus status : compactionTasks) {
final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
if (response == null) {
throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
}
if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
DataSourceCompactionConfig dataSourceCompactionConfig = compactionConfigs.get(status.getDataSource());
if (dataSourceCompactionConfig != null && dataSourceCompactionConfig.getGranularitySpec() != null) {
Granularity configuredSegmentGranularity = dataSourceCompactionConfig.getGranularitySpec().getSegmentGranularity();
if (configuredSegmentGranularity != null && compactionTaskQuery.getGranularitySpec() != null && !configuredSegmentGranularity.equals(compactionTaskQuery.getGranularitySpec().getSegmentGranularity())) {
// We will cancel active compaction task if segmentGranularity changes and we will need to
// re-compact the interval
LOG.info("Canceled task[%s] as task segmentGranularity is [%s] but compaction config " + "segmentGranularity is [%s]", status.getId(), compactionTaskQuery.getGranularitySpec().getSegmentGranularity(), configuredSegmentGranularity);
indexingServiceClient.cancelTask(status.getId());
continue;
}
}
// Skip interval as the current active compaction task is good
final Interval interval = compactionTaskQuery.getIoConfig().getInputSpec().getInterval();
intervalsToSkipCompaction.computeIfAbsent(status.getDataSource(), k -> new ArrayList<>()).add(interval);
// Since we keep the current active compaction task running, we count the active task slots
numEstimatedNonCompleteCompactionTasks += findMaxNumTaskSlotsUsedByOneCompactionTask(compactionTaskQuery.getTuningConfig());
} else {
throw new ISE("task[%s] is not a compactionTask", status.getId());
}
}
// Skip all the intervals locked by higher priority tasks for each datasource
// This must be done after the invalid compaction tasks are cancelled
// in the loop above so that their intervals are not considered locked
getLockedIntervalsToSkip(compactionConfigList).forEach((dataSource, intervals) -> intervalsToSkipCompaction.computeIfAbsent(dataSource, ds -> new ArrayList<>()).addAll(intervals));
final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, intervalsToSkipCompaction);
int totalCapacity;
if (dynamicConfig.isUseAutoScaleSlots()) {
try {
totalCapacity = indexingServiceClient.getTotalWorkerCapacityWithAutoScale();
} catch (Exception e) {
LOG.warn("Failed to get total worker capacity with auto scale slots. Falling back to current capacity count");
totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
}
} else {
totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
}
final int compactionTaskCapacity = (int) Math.min(totalCapacity * dynamicConfig.getCompactionTaskSlotRatio(), dynamicConfig.getMaxCompactionTaskSlots());
final int numAvailableCompactionTaskSlots;
if (numEstimatedNonCompleteCompactionTasks > 0) {
numAvailableCompactionTaskSlots = Math.max(0, compactionTaskCapacity - numEstimatedNonCompleteCompactionTasks);
} else {
// compactionTaskCapacity might be 0 if totalWorkerCapacity is low.
// This guarantees that at least one slot is available if
// compaction is enabled and numEstimatedNonCompleteCompactionTasks is 0.
numAvailableCompactionTaskSlots = Math.max(1, compactionTaskCapacity);
}
LOG.info("Found [%d] available task slots for compaction out of [%d] max compaction task capacity", numAvailableCompactionTaskSlots, compactionTaskCapacity);
stats.addToGlobalStat(AVAILABLE_COMPACTION_TASK_SLOT, numAvailableCompactionTaskSlots);
stats.addToGlobalStat(MAX_COMPACTION_TASK_SLOT, compactionTaskCapacity);
final Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders = new HashMap<>();
if (numAvailableCompactionTaskSlots > 0) {
stats.accumulate(doRun(compactionConfigs, currentRunAutoCompactionSnapshotBuilders, numAvailableCompactionTaskSlots, iterator));
} else {
stats.accumulate(makeStats(currentRunAutoCompactionSnapshotBuilders, 0, iterator));
}
} else {
LOG.info("compactionConfig is empty. Skip.");
autoCompactionSnapshotPerDataSource.set(new HashMap<>());
}
} else {
LOG.info("maxCompactionTaskSlots was set to 0. Skip compaction");
autoCompactionSnapshotPerDataSource.set(new HashMap<>());
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResource method computeSegmentLoadStatistics.
private SegmentsLoadStatistics computeSegmentLoadStatistics(Iterable<DataSegment> segments) {
Map<SegmentId, SegmentLoadInfo> segmentLoadInfos = serverInventoryView.getSegmentLoadInfos();
int numPublishedSegments = 0;
int numUnavailableSegments = 0;
int numLoadedSegments = 0;
for (DataSegment segment : segments) {
numPublishedSegments++;
if (!segmentLoadInfos.containsKey(segment.getId())) {
numUnavailableSegments++;
} else {
numLoadedSegments++;
}
}
return new SegmentsLoadStatistics(numPublishedSegments, numUnavailableSegments, numLoadedSegments);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResource method getDataSource.
@Nullable
private ImmutableDruidDataSource getDataSource(final String dataSourceName) {
List<DruidDataSource> dataSources = serverInventoryView.getInventory().stream().map(server -> server.getDataSource(dataSourceName)).filter(Objects::nonNull).collect(Collectors.toList());
if (dataSources.isEmpty()) {
return null;
}
// Note: this logic doesn't guarantee that the result is a snapshot that ever existed in the cluster because all
// DruidDataSource objects (belonging to different servers) are independently, concurrently mutable objects.
// But this is OK because a "snapshot" hardly even makes sense in a distributed system anyway.
final SortedMap<SegmentId, DataSegment> segmentMap = new TreeMap<>();
for (DruidDataSource dataSource : dataSources) {
Iterable<DataSegment> segments = dataSource.getSegments();
for (DataSegment segment : segments) {
segmentMap.put(segment.getId(), segment);
}
}
return new ImmutableDruidDataSource(dataSourceName, Collections.emptyMap(), segmentMap);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class MarkAsUnusedOvershadowedSegments method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
// Mark as unused overshadowed segments only if we've had enough time to make sure we aren't flapping with old data.
if (!params.coordinatorIsLeadingEnoughTimeToMarkAsUnusedOvershadowedSegements()) {
return params;
}
CoordinatorStats stats = new CoordinatorStats();
DruidCluster cluster = params.getDruidCluster();
Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = new HashMap<>();
for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
for (ServerHolder serverHolder : serverHolders) {
addSegmentsFromServer(serverHolder, timelines);
}
}
for (ServerHolder serverHolder : cluster.getBrokers()) {
addSegmentsFromServer(serverHolder, timelines);
}
// Mark all segments as unused in db that are overshadowed by served segments
for (DataSegment dataSegment : params.getUsedSegments()) {
VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment)) {
coordinator.markSegmentAsUnused(dataSegment);
stats.addToGlobalStat("overShadowedCount", 1);
}
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Aggregations