use of io.druid.server.coordinator.DatasourceWhitelist in project druid by druid-io.
the class DruidCoordinatorVersionConverter method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
DatasourceWhitelist whitelist = whitelistRef.get();
for (DataSegment dataSegment : params.getAvailableSegments()) {
if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) {
final Integer binaryVersion = dataSegment.getBinaryVersion();
if (binaryVersion == null || binaryVersion < IndexIO.CURRENT_VERSION_ID) {
log.info("Upgrading version on segment[%s]", dataSegment.getIdentifier());
indexingServiceClient.upgradeSegment(dataSegment);
}
}
}
return params;
}
use of io.druid.server.coordinator.DatasourceWhitelist in project druid by druid-io.
the class DruidCoordinatorSegmentMerger method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
DatasourceWhitelist whitelist = whiteListRef.get();
CoordinatorStats stats = new CoordinatorStats();
Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = Maps.newHashMap();
// Find serviced segments by using a timeline
for (DataSegment dataSegment : params.getAvailableSegments()) {
if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) {
VersionedIntervalTimeline<String, DataSegment> timeline = dataSources.get(dataSegment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<String, DataSegment>(Ordering.<String>natural());
dataSources.put(dataSegment.getDataSource(), timeline);
}
timeline.add(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment.getShardSpec().createChunk(dataSegment));
}
}
// Find segments to merge
for (final Map.Entry<String, VersionedIntervalTimeline<String, DataSegment>> entry : dataSources.entrySet()) {
// Get serviced segments from the timeline
VersionedIntervalTimeline<String, DataSegment> timeline = entry.getValue();
List<TimelineObjectHolder<String, DataSegment>> timelineObjects = timeline.lookup(new Interval(new DateTime(0), new DateTime("3000-01-01")));
// Accumulate timelineObjects greedily until we reach our limits, then backtrack to the maximum complete set
SegmentsToMerge segmentsToMerge = new SegmentsToMerge();
for (int i = 0; i < timelineObjects.size(); i++) {
if (!segmentsToMerge.add(timelineObjects.get(i)) || segmentsToMerge.getByteCount() > params.getCoordinatorDynamicConfig().getMergeBytesLimit() || segmentsToMerge.getSegmentCount() >= params.getCoordinatorDynamicConfig().getMergeSegmentsLimit()) {
i -= segmentsToMerge.backtrack(params.getCoordinatorDynamicConfig().getMergeBytesLimit());
if (segmentsToMerge.getSegmentCount() > 1) {
stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey()));
}
if (segmentsToMerge.getSegmentCount() == 0) {
// Backtracked all the way to zero. Increment by one so we continue to make progress.
i++;
}
segmentsToMerge = new SegmentsToMerge();
}
}
// Finish any timelineObjects to merge that may have not hit threshold
segmentsToMerge.backtrack(params.getCoordinatorDynamicConfig().getMergeBytesLimit());
if (segmentsToMerge.getSegmentCount() > 1) {
stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey()));
}
}
log.info("Issued merge requests for %s segments", stats.getGlobalStats().get("mergedCount").get());
params.getEmitter().emit(new ServiceMetricEvent.Builder().build("coordinator/merge/count", stats.getGlobalStats().get("mergedCount")));
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Aggregations