use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class ServerManager method buildAndDecorateQueryRunner.
private <T> QueryRunner<T> buildAndDecorateQueryRunner(final QueryRunnerFactory<T, Query<T>> factory, final QueryToolChest<T, Query<T>> toolChest, final SegmentReference segment, final Optional<byte[]> cacheKeyPrefix, final SegmentDescriptor segmentDescriptor, final AtomicLong cpuTimeAccumulator) {
final SpecificSegmentSpec segmentSpec = new SpecificSegmentSpec(segmentDescriptor);
final SegmentId segmentId = segment.getId();
final Interval segmentInterval = segment.getDataInterval();
// If the segment is closed after this line, ReferenceCountingSegmentQueryRunner will handle and do the right thing.
if (segmentId == null || segmentInterval == null) {
return new ReportTimelineMissingSegmentQueryRunner<>(segmentDescriptor);
}
String segmentIdString = segmentId.toString();
MetricsEmittingQueryRunner<T> metricsEmittingQueryRunnerInner = new MetricsEmittingQueryRunner<>(emitter, toolChest, new ReferenceCountingSegmentQueryRunner<>(factory, segment, segmentDescriptor), QueryMetrics::reportSegmentTime, queryMetrics -> queryMetrics.segment(segmentIdString));
StorageAdapter storageAdapter = segment.asStorageAdapter();
long segmentMaxTime = storageAdapter.getMaxTime().getMillis();
long segmentMinTime = storageAdapter.getMinTime().getMillis();
Interval actualDataInterval = Intervals.utc(segmentMinTime, segmentMaxTime + 1);
CachingQueryRunner<T> cachingQueryRunner = new CachingQueryRunner<>(segmentIdString, cacheKeyPrefix, segmentDescriptor, actualDataInterval, objectMapper, cache, toolChest, metricsEmittingQueryRunnerInner, cachePopulator, cacheConfig);
BySegmentQueryRunner<T> bySegmentQueryRunner = new BySegmentQueryRunner<>(segmentId, segmentInterval.getStart(), cachingQueryRunner);
MetricsEmittingQueryRunner<T> metricsEmittingQueryRunnerOuter = new MetricsEmittingQueryRunner<>(emitter, toolChest, bySegmentQueryRunner, QueryMetrics::reportSegmentAndCacheTime, queryMetrics -> queryMetrics.segment(segmentIdString)).withWaitMeasuredFromNow();
SpecificSegmentQueryRunner<T> specificSegmentQueryRunner = new SpecificSegmentQueryRunner<>(metricsEmittingQueryRunnerOuter, segmentSpec);
PerSegmentOptimizingQueryRunner<T> perSegmentOptimizingQueryRunner = new PerSegmentOptimizingQueryRunner<>(specificSegmentQueryRunner, new PerSegmentQueryOptimizationContext(segmentDescriptor));
return new SetAndVerifyContextQueryRunner<>(serverConfig, CPUTimeMetricQueryRunner.safeBuild(perSegmentOptimizingQueryRunner, toolChest, emitter, cpuTimeAccumulator, false));
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class SegmentReplicantLookup method make.
public static SegmentReplicantLookup make(DruidCluster cluster, boolean replicateAfterLoadTimeout) {
final Table<SegmentId, String, Integer> segmentsInCluster = HashBasedTable.create();
/**
* For each tier, this stores the number of replicants for all the segments presently queued to load in {@link cluster}.
* Segments that have failed to load due to the load timeout may not be present in this table if {@link replicateAfterLoadTimeout} is true.
* This is to enable additional replication of the timed out segments for improved availability.
*/
final Table<SegmentId, String, Integer> loadingSegments = HashBasedTable.create();
for (SortedSet<ServerHolder> serversByType : cluster.getSortedHistoricalsByTier()) {
for (ServerHolder serverHolder : serversByType) {
ImmutableDruidServer server = serverHolder.getServer();
for (DataSegment segment : server.iterateAllSegments()) {
Integer numReplicants = segmentsInCluster.get(segment.getId(), server.getTier());
if (numReplicants == null) {
numReplicants = 0;
}
segmentsInCluster.put(segment.getId(), server.getTier(), numReplicants + 1);
}
// Also account for queued segments
for (DataSegment segment : serverHolder.getPeon().getSegmentsToLoad()) {
Integer numReplicants = loadingSegments.get(segment.getId(), server.getTier());
if (numReplicants == null) {
numReplicants = 0;
}
// Therefore we skip incrementing numReplicants for timed out segments if replicateAfterLoadTimeout is enabled.
if (!replicateAfterLoadTimeout || !serverHolder.getPeon().getTimedOutSegments().contains(segment)) {
loadingSegments.put(segment.getId(), server.getTier(), numReplicants + 1);
}
}
}
}
return new SegmentReplicantLookup(segmentsInCluster, loadingSegments, cluster);
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidCoordinator method moveSegment.
public void moveSegment(DruidCoordinatorRuntimeParams params, ImmutableDruidServer fromServer, ImmutableDruidServer toServer, DataSegment segment, final LoadPeonCallback callback) {
if (segment == null) {
log.makeAlert(new IAE("Can not move null DataSegment"), "Exception moving null segment").emit();
if (callback != null) {
callback.execute();
}
throw new ISE("Cannot move null DataSegment");
}
SegmentId segmentId = segment.getId();
try {
if (fromServer.getMetadata().equals(toServer.getMetadata())) {
throw new IAE("Cannot move [%s] to and from the same server [%s]", segmentId, fromServer.getName());
}
ImmutableDruidDataSource dataSource = params.getDataSourcesSnapshot().getDataSource(segment.getDataSource());
if (dataSource == null) {
throw new IAE("Unable to find dataSource for segment [%s] in metadata", segmentId);
}
// get segment information from SegmentsMetadataManager instead of getting it from fromServer's.
// This is useful when SegmentsMetadataManager and fromServer DataSegment's are different for same
// identifier (say loadSpec differs because of deep storage migration).
final DataSegment segmentToLoad = dataSource.getSegment(segment.getId());
if (segmentToLoad == null) {
throw new IAE("No segment metadata found for segment Id [%s]", segment.getId());
}
final LoadQueuePeon loadPeon = loadManagementPeons.get(toServer.getName());
if (loadPeon == null) {
throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", toServer.getName());
}
final LoadQueuePeon dropPeon = loadManagementPeons.get(fromServer.getName());
if (dropPeon == null) {
throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", fromServer.getName());
}
final ServerHolder toHolder = new ServerHolder(toServer, loadPeon);
if (toHolder.getAvailableSize() < segmentToLoad.getSize()) {
throw new IAE("Not enough capacity on server [%s] for segment [%s]. Required: %,d, available: %,d.", toServer.getName(), segmentToLoad, segmentToLoad.getSize(), toHolder.getAvailableSize());
}
final String toLoadQueueSegPath = ZKPaths.makePath(zkPaths.getLoadQueuePath(), toServer.getName(), segmentId.toString());
final LoadPeonCallback loadPeonCallback = () -> {
dropPeon.unmarkSegmentToDrop(segmentToLoad);
if (callback != null) {
callback.execute();
}
};
// mark segment to drop before it is actually loaded on server
// to be able to account this information in DruidBalancerStrategy immediately
dropPeon.markSegmentToDrop(segmentToLoad);
try {
loadPeon.loadSegment(segmentToLoad, () -> {
try {
if (serverInventoryView.isSegmentLoadedByServer(toServer.getName(), segment) && (curator == null || curator.checkExists().forPath(toLoadQueueSegPath) == null) && !dropPeon.getSegmentsToDrop().contains(segment)) {
dropPeon.dropSegment(segment, loadPeonCallback);
} else {
loadPeonCallback.execute();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
});
} catch (Exception e) {
dropPeon.unmarkSegmentToDrop(segmentToLoad);
throw new RuntimeException(e);
}
} catch (Exception e) {
log.makeAlert(e, "Exception moving segment %s", segmentId).emit();
if (callback != null) {
callback.execute();
}
}
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class BalanceSegments method moveSegment.
protected boolean moveSegment(final BalancerSegmentHolder segment, final ImmutableDruidServer toServer, final DruidCoordinatorRuntimeParams params) {
final LoadQueuePeon toPeon = params.getLoadManagementPeons().get(toServer.getName());
final ImmutableDruidServer fromServer = segment.getFromServer();
final DataSegment segmentToMove = segment.getSegment();
final SegmentId segmentId = segmentToMove.getId();
if (!toPeon.getSegmentsToLoad().contains(segmentToMove) && (toServer.getSegment(segmentId) == null) && new ServerHolder(toServer, toPeon).getAvailableSize() > segmentToMove.getSize()) {
log.debug("Moving [%s] from [%s] to [%s]", segmentId, fromServer.getName(), toServer.getName());
LoadPeonCallback callback = null;
try {
ConcurrentMap<SegmentId, BalancerSegmentHolder> movingSegments = currentlyMovingSegments.get(toServer.getTier());
movingSegments.put(segmentId, segment);
callback = () -> movingSegments.remove(segmentId);
coordinator.moveSegment(params, fromServer, toServer, segmentToMove, callback);
return true;
} catch (Exception e) {
log.makeAlert(e, StringUtils.format("[%s] : Moving exception", segmentId)).emit();
if (callback != null) {
callback.execute();
}
}
}
return false;
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class CoordinatorServerView method serverAddedSegment.
private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) {
SegmentId segmentId = segment.getId();
synchronized (lock) {
log.debug("Adding segment[%s] for server[%s]", segment, server);
SegmentLoadInfo segmentLoadInfo = segmentLoadInfos.get(segmentId);
if (segmentLoadInfo == null) {
// servers escape the scope of this object so use ConcurrentSet
segmentLoadInfo = new SegmentLoadInfo(segment);
VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = timelines.get(segment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
timelines.put(segment.getDataSource(), timeline);
}
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segmentLoadInfo));
segmentLoadInfos.put(segmentId, segmentLoadInfo);
}
segmentLoadInfo.addServer(server);
}
}
Aggregations