use of io.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class NumberedShardSpecTest method testVersionedIntervalTimelineBehaviorForNumberedShardSpec.
private void testVersionedIntervalTimelineBehaviorForNumberedShardSpec(List<PartitionChunk<String>> chunks, Set<String> expectedObjects) {
VersionedIntervalTimeline<String, String> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
Interval interval = new Interval("2000/3000");
String version = "v1";
for (PartitionChunk<String> chunk : chunks) {
timeline.add(interval, version, chunk);
}
Set<String> actualObjects = new HashSet<>();
List<TimelineObjectHolder<String, String>> entries = timeline.lookup(interval);
for (TimelineObjectHolder<String, String> entry : entries) {
for (PartitionChunk<String> chunk : entry.getObject()) {
actualObjects.add(chunk.getObject());
}
}
Assert.assertEquals(expectedObjects, actualObjects);
}
use of io.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class ServerManager method getQueryRunnerForIntervals.
@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
throw new ISE("Unknown query type[%s].", query.getClass());
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = getBuilderFn(toolChest);
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
DataSource dataSource = query.getDataSource();
if (!(dataSource instanceof TableDataSource)) {
throw new UnsupportedOperationException("data source type '" + dataSource.getClass().getName() + "' unsupported");
}
String dataSourceName = getDataSourceName(dataSource);
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = dataSources.get(dataSourceName);
if (timeline == null) {
return new NoopQueryRunner<T>();
}
FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(intervals).transformCat(new Function<Interval, Iterable<TimelineObjectHolder<String, ReferenceCountingSegment>>>() {
@Override
public Iterable<TimelineObjectHolder<String, ReferenceCountingSegment>> apply(Interval input) {
return timeline.lookup(input);
}
}).transformCat(new Function<TimelineObjectHolder<String, ReferenceCountingSegment>, Iterable<QueryRunner<T>>>() {
@Override
public Iterable<QueryRunner<T>> apply(@Nullable final TimelineObjectHolder<String, ReferenceCountingSegment> holder) {
if (holder == null) {
return null;
}
return FunctionalIterable.create(holder.getObject()).transform(new Function<PartitionChunk<ReferenceCountingSegment>, QueryRunner<T>>() {
@Override
public QueryRunner<T> apply(PartitionChunk<ReferenceCountingSegment> input) {
return buildAndDecorateQueryRunner(factory, toolChest, input.getObject(), new SegmentDescriptor(holder.getInterval(), holder.getVersion(), input.getChunkNumber()), builderFn, cpuTimeAccumulator);
}
});
}
});
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<T>(toolChest.mergeResults(factory.mergeRunners(exec, queryRunners)), toolChest), builderFn, emitter, cpuTimeAccumulator, true);
}
use of io.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class ClientInfoResource method getDatasource.
@GET
@Path("/{dataSourceName}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(DatasourceResourceFilter.class)
public Map<String, Object> getDatasource(@PathParam("dataSourceName") String dataSourceName, @QueryParam("interval") String interval, @QueryParam("full") String full) {
if (full == null) {
return ImmutableMap.<String, Object>of(KEY_DIMENSIONS, getDatasourceDimensions(dataSourceName, interval), KEY_METRICS, getDatasourceMetrics(dataSourceName, interval));
}
Interval theInterval;
if (interval == null || interval.isEmpty()) {
DateTime now = getCurrentTime();
theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
} else {
theInterval = new Interval(interval);
}
TimelineLookup<String, ServerSelector> timeline = timelineServerView.getTimeline(new TableDataSource(dataSourceName));
Iterable<TimelineObjectHolder<String, ServerSelector>> serversLookup = timeline != null ? timeline.lookup(theInterval) : null;
if (serversLookup == null || Iterables.isEmpty(serversLookup)) {
return Collections.EMPTY_MAP;
}
Map<Interval, Object> servedIntervals = new TreeMap<>(new Comparator<Interval>() {
@Override
public int compare(Interval o1, Interval o2) {
if (o1.equals(o2) || o1.overlaps(o2)) {
return 0;
} else {
return o1.isBefore(o2) ? -1 : 1;
}
}
});
for (TimelineObjectHolder<String, ServerSelector> holder : serversLookup) {
final Set<Object> dimensions = Sets.newHashSet();
final Set<Object> metrics = Sets.newHashSet();
final PartitionHolder<ServerSelector> partitionHolder = holder.getObject();
if (partitionHolder.isComplete()) {
for (ServerSelector server : partitionHolder.payloads()) {
final DataSegment segment = server.getSegment();
dimensions.addAll(segment.getDimensions());
metrics.addAll(segment.getMetrics());
}
}
servedIntervals.put(holder.getInterval(), ImmutableMap.of(KEY_DIMENSIONS, dimensions, KEY_METRICS, metrics));
}
//collapse intervals if they abut and have same set of columns
Map<String, Object> result = Maps.newLinkedHashMap();
Interval curr = null;
Map<String, Set<String>> cols = null;
for (Map.Entry<Interval, Object> e : servedIntervals.entrySet()) {
Interval ival = e.getKey();
if (curr != null && curr.abuts(ival) && cols.equals(e.getValue())) {
curr = curr.withEnd(ival.getEnd());
} else {
if (curr != null) {
result.put(curr.toString(), cols);
}
curr = ival;
cols = (Map<String, Set<String>>) e.getValue();
}
}
//add the last one in
if (curr != null) {
result.put(curr.toString(), cols);
}
return result;
}
use of io.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class DruidCoordinatorSegmentMerger method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
DatasourceWhitelist whitelist = whiteListRef.get();
CoordinatorStats stats = new CoordinatorStats();
Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = Maps.newHashMap();
// Find serviced segments by using a timeline
for (DataSegment dataSegment : params.getAvailableSegments()) {
if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) {
VersionedIntervalTimeline<String, DataSegment> timeline = dataSources.get(dataSegment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<String, DataSegment>(Ordering.<String>natural());
dataSources.put(dataSegment.getDataSource(), timeline);
}
timeline.add(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment.getShardSpec().createChunk(dataSegment));
}
}
// Find segments to merge
for (final Map.Entry<String, VersionedIntervalTimeline<String, DataSegment>> entry : dataSources.entrySet()) {
// Get serviced segments from the timeline
VersionedIntervalTimeline<String, DataSegment> timeline = entry.getValue();
List<TimelineObjectHolder<String, DataSegment>> timelineObjects = timeline.lookup(new Interval(new DateTime(0), new DateTime("3000-01-01")));
// Accumulate timelineObjects greedily until we reach our limits, then backtrack to the maximum complete set
SegmentsToMerge segmentsToMerge = new SegmentsToMerge();
for (int i = 0; i < timelineObjects.size(); i++) {
if (!segmentsToMerge.add(timelineObjects.get(i)) || segmentsToMerge.getByteCount() > params.getCoordinatorDynamicConfig().getMergeBytesLimit() || segmentsToMerge.getSegmentCount() >= params.getCoordinatorDynamicConfig().getMergeSegmentsLimit()) {
i -= segmentsToMerge.backtrack(params.getCoordinatorDynamicConfig().getMergeBytesLimit());
if (segmentsToMerge.getSegmentCount() > 1) {
stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey()));
}
if (segmentsToMerge.getSegmentCount() == 0) {
// Backtracked all the way to zero. Increment by one so we continue to make progress.
i++;
}
segmentsToMerge = new SegmentsToMerge();
}
}
// Finish any timelineObjects to merge that may have not hit threshold
segmentsToMerge.backtrack(params.getCoordinatorDynamicConfig().getMergeBytesLimit());
if (segmentsToMerge.getSegmentCount() > 1) {
stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey()));
}
}
log.info("Issued merge requests for %s segments", stats.getGlobalStats().get("mergedCount").get());
params.getEmitter().emit(new ServiceMetricEvent.Builder().build("coordinator/merge/count", stats.getGlobalStats().get("mergedCount")));
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of io.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class BrokerServerViewTest method testMultipleServerAddedRemovedSegment.
@Test
public void testMultipleServerAddedRemovedSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(5);
// temporarily set latch count to 1
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final List<DruidServer> druidServers = Lists.transform(ImmutableList.<String>of("locahost:0", "localhost:1", "localhost:2", "localhost:3", "localhost:4"), new Function<String, DruidServer>() {
@Override
public DruidServer apply(String input) {
return new DruidServer(input, input, 10000000L, "historical", "default_tier", 0);
}
});
for (DruidServer druidServer : druidServers) {
setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
}
final List<DataSegment> segments = Lists.transform(ImmutableList.<Pair<String, String>>of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-01/2011-04-09", "v2"), Pair.of("2011-04-06/2011-04-09", "v3"), Pair.of("2011-04-01/2011-04-02", "v3")), new Function<Pair<String, String>, DataSegment>() {
@Override
public DataSegment apply(Pair<String, String> input) {
return dataSegmentWithIntervalAndVersion(input.lhs, input.rhs);
}
});
for (int i = 0; i < 5; ++i) {
announceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = brokerServerView.getTimeline(new TableDataSource("test_broker_server_view"));
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(new Interval("2011-04-01/2011-04-09")));
// unannounce the segment created by dataSegmentWithIntervalAndVersion("2011-04-01/2011-04-09", "v2")
unannounceSegmentForServer(druidServers.get(2), segments.get(2), zkPathsConfig);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// renew segmentRemovedLatch since we still have 4 segments to unannounce
segmentRemovedLatch = new CountDownLatch(4);
timeline = brokerServerView.getTimeline(new TableDataSource("test_broker_server_view"));
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-03", "v1", druidServers.get(0), segments.get(0)), createExpected("2011-04-03/2011-04-06", "v1", druidServers.get(1), segments.get(1)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(new Interval("2011-04-01/2011-04-09")));
// unannounce all the segments
for (int i = 0; i < 5; ++i) {
// skip the one that was previously unannounced
if (i != 2) {
unannounceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig);
}
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(new Interval("2011-04-01/2011-04-09"))).size());
}
Aggregations