use of io.druid.client.ImmutableSegmentLoadInfo in project druid by druid-io.
the class DatasourcesResource method getSegmentDataSourceSpecificInterval.
/**
* Provides serverView for a datasource and Interval which gives details about servers hosting segments for an interval
* Used by the realtime tasks to fetch a view of the interval they are interested in.
*/
@GET
@Path("/{dataSourceName}/intervals/{interval}/serverview")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(DatasourceResourceFilter.class)
public Response getSegmentDataSourceSpecificInterval(@PathParam("dataSourceName") String dataSourceName, @PathParam("interval") String interval, @QueryParam("partial") final boolean partial) {
TimelineLookup<String, SegmentLoadInfo> timeline = serverInventoryView.getTimeline(new TableDataSource(dataSourceName));
final Interval theInterval = new Interval(interval.replace("_", "/"));
if (timeline == null) {
log.debug("No timeline found for datasource[%s]", dataSourceName);
return Response.ok(Lists.<ImmutableSegmentLoadInfo>newArrayList()).build();
}
Iterable<TimelineObjectHolder<String, SegmentLoadInfo>> lookup = timeline.lookupWithIncompletePartitions(theInterval);
FunctionalIterable<ImmutableSegmentLoadInfo> retval = FunctionalIterable.create(lookup).transformCat(new Function<TimelineObjectHolder<String, SegmentLoadInfo>, Iterable<ImmutableSegmentLoadInfo>>() {
@Override
public Iterable<ImmutableSegmentLoadInfo> apply(TimelineObjectHolder<String, SegmentLoadInfo> input) {
return Iterables.transform(input.getObject(), new Function<PartitionChunk<SegmentLoadInfo>, ImmutableSegmentLoadInfo>() {
@Override
public ImmutableSegmentLoadInfo apply(PartitionChunk<SegmentLoadInfo> chunk) {
return chunk.getObject().toImmutableSegmentLoadInfo();
}
});
}
});
return Response.ok(retval).build();
}
use of io.druid.client.ImmutableSegmentLoadInfo in project druid by druid-io.
the class ImmutableSegmentLoadInfoTest method testSerde.
@Test
public void testSerde() throws IOException {
ImmutableSegmentLoadInfo segmentLoadInfo = new ImmutableSegmentLoadInfo(new DataSegment("test_ds", new Interval("2011-04-01/2011-04-02"), "v1", null, null, null, NoneShardSpec.instance(), 0, 0), Sets.newHashSet(new DruidServerMetadata("a", "host", 10, "type", "tier", 1)));
ImmutableSegmentLoadInfo serde = mapper.readValue(mapper.writeValueAsBytes(segmentLoadInfo), ImmutableSegmentLoadInfo.class);
Assert.assertEquals(segmentLoadInfo, serde);
}
use of io.druid.client.ImmutableSegmentLoadInfo in project druid by druid-io.
the class CoordinatorBasedSegmentHandoffNotifierTest method testHandoffChecksForAssignableServer.
@Test
public void testHandoffChecksForAssignableServer() {
Interval interval = new Interval("2011-04-01/2011-04-02");
Assert.assertTrue(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v1", 2), Sets.newHashSet(createHistoricalServerMetadata("a")))), new SegmentDescriptor(interval, "v1", 2)));
Assert.assertFalse(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v1", 2), Sets.newHashSet(createRealtimeServerMetadata("a")))), new SegmentDescriptor(interval, "v1", 2)));
}
use of io.druid.client.ImmutableSegmentLoadInfo in project druid by druid-io.
the class CoordinatorBasedSegmentHandoffNotifier method checkForSegmentHandoffs.
void checkForSegmentHandoffs() {
try {
Iterator<Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>>> itr = handOffCallbacks.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry = itr.next();
SegmentDescriptor descriptor = entry.getKey();
try {
List<ImmutableSegmentLoadInfo> loadedSegments = coordinatorClient.fetchServerView(dataSource, descriptor.getInterval(), true);
if (isHandOffComplete(loadedSegments, entry.getKey())) {
log.info("Segment Handoff complete for dataSource[%s] Segment[%s]", dataSource, descriptor);
entry.getValue().lhs.execute(entry.getValue().rhs);
itr.remove();
}
} catch (Exception e) {
log.error(e, "Exception while checking handoff for dataSource[%s] Segment[%s], Will try again after [%d]secs", dataSource, descriptor, pollDurationMillis);
}
}
if (!handOffCallbacks.isEmpty()) {
log.info("Still waiting for Handoff for Segments : [%s]", handOffCallbacks.keySet());
}
} catch (Throwable t) {
log.error(t, "Exception while checking handoff for dataSource[%s] Segment[%s], Will try again after [%d]secs", dataSource, pollDurationMillis);
}
}
Aggregations