use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class MetadataResource method getUsedSegmentsInDataSource.
@GET
@Path("/datasources/{dataSourceName}/segments")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(DatasourceResourceFilter.class)
public Response getUsedSegmentsInDataSource(@PathParam("dataSourceName") String dataSourceName, @QueryParam("full") @Nullable String full) {
ImmutableDruidDataSource dataSource = segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(dataSourceName);
if (dataSource == null) {
return Response.status(Response.Status.NOT_FOUND).build();
}
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
if (full != null) {
return builder.entity(dataSource.getSegments()).build();
}
return builder.entity(Collections2.transform(dataSource.getSegments(), DataSegment::getId)).build();
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class MetadataResource method getAllUsedSegmentsWithOvershadowedStatus.
private Response getAllUsedSegmentsWithOvershadowedStatus(HttpServletRequest req, @Nullable Set<String> dataSources) {
DataSourcesSnapshot dataSourcesSnapshot = segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments();
Collection<ImmutableDruidDataSource> dataSourcesWithUsedSegments = dataSourcesSnapshot.getDataSourcesWithAllUsedSegments();
if (dataSources != null && !dataSources.isEmpty()) {
dataSourcesWithUsedSegments = dataSourcesWithUsedSegments.stream().filter(dataSourceWithUsedSegments -> dataSources.contains(dataSourceWithUsedSegments.getName())).collect(Collectors.toList());
}
final Stream<DataSegment> usedSegments = dataSourcesWithUsedSegments.stream().flatMap(t -> t.getSegments().stream());
final Set<SegmentId> overshadowedSegments = dataSourcesSnapshot.getOvershadowedSegments();
final Stream<SegmentWithOvershadowedStatus> usedSegmentsWithOvershadowedStatus = usedSegments.map(segment -> new SegmentWithOvershadowedStatus(segment, overshadowedSegments.contains(segment.getId())));
final Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>> raGenerator = segment -> Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(segment.getDataSegment().getDataSource()));
final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources(req, usedSegmentsWithOvershadowedStatus::iterator, raGenerator, authorizerMapper);
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
return builder.entity(authorizedSegments).build();
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class MarkAsUnusedOvershadowedSegments method addSegmentsFromServer.
private void addSegmentsFromServer(ServerHolder serverHolder, Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines) {
ImmutableDruidServer server = serverHolder.getServer();
for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
VersionedIntervalTimeline<String, DataSegment> timeline = timelines.computeIfAbsent(dataSource.getName(), dsName -> new VersionedIntervalTimeline<>(Comparator.naturalOrder()));
VersionedIntervalTimeline.addSegments(timeline, dataSource.getSegments().iterator());
}
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class SqlSegmentsMetadataManagerTest method testPollOnDemand.
@Test
public void testPollOnDemand() {
DataSourcesSnapshot dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertNull(dataSourcesSnapshot);
// This should return false and not wait/poll anything as we did not schedule periodic poll
Assert.assertFalse(sqlSegmentsMetadataManager.useLatestSnapshotIfWithinDelay());
Assert.assertNull(dataSourcesSnapshot);
// This call will force on demand poll
sqlSegmentsMetadataManager.forceOrWaitOngoingDatabasePoll();
Assert.assertFalse(sqlSegmentsMetadataManager.isPollingDatabasePeriodically());
Assert.assertTrue(sqlSegmentsMetadataManager.getLatestDatabasePoll() instanceof SqlSegmentsMetadataManager.OnDemandDatabasePoll);
dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertEquals(ImmutableSet.of("wikipedia"), sqlSegmentsMetadataManager.retrieveAllDataSourceNames());
Assert.assertEquals(ImmutableList.of("wikipedia"), dataSourcesSnapshot.getDataSourcesWithAllUsedSegments().stream().map(ImmutableDruidDataSource::getName).collect(Collectors.toList()));
Assert.assertEquals(ImmutableSet.of(segment1, segment2), ImmutableSet.copyOf(dataSourcesSnapshot.getDataSource("wikipedia").getSegments()));
Assert.assertEquals(ImmutableSet.of(segment1, segment2), ImmutableSet.copyOf(dataSourcesSnapshot.iterateAllUsedSegmentsInSnapshot()));
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class UnloadUnusedSegmentsTest method setUp.
@Before
public void setUp() {
coordinator = EasyMock.createMock(DruidCoordinator.class);
historicalServer = EasyMock.createMock(ImmutableDruidServer.class);
historicalServerTier2 = EasyMock.createMock(ImmutableDruidServer.class);
brokerServer = EasyMock.createMock(ImmutableDruidServer.class);
indexerServer = EasyMock.createMock(ImmutableDruidServer.class);
segment1 = EasyMock.createMock(DataSegment.class);
segment2 = EasyMock.createMock(DataSegment.class);
databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
DateTime start1 = DateTimes.of("2012-01-01");
DateTime start2 = DateTimes.of("2012-02-01");
DateTime version = DateTimes.of("2012-05-01");
segment1 = new DataSegment("datasource1", new Interval(start1, start1.plusHours(1)), version.toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 11L);
segment2 = new DataSegment("datasource2", new Interval(start1, start1.plusHours(1)), version.toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 7L);
realtimeOnlySegment = new DataSegment("datasource2", new Interval(start2, start2.plusHours(1)), version.toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 7L);
broadcastSegment = new DataSegment("broadcastDatasource", new Interval(start1, start1.plusHours(1)), version.toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 7L);
segments = new ArrayList<>();
segments.add(segment1);
segments.add(segment2);
segments.add(broadcastSegment);
segmentsForRealtime = new ArrayList<>();
segmentsForRealtime.add(realtimeOnlySegment);
segmentsForRealtime.add(broadcastSegment);
historicalPeon = new LoadQueuePeonTester();
historicalTier2Peon = new LoadQueuePeonTester();
brokerPeon = new LoadQueuePeonTester();
indexerPeon = new LoadQueuePeonTester();
dataSource1 = new ImmutableDruidDataSource("datasource1", Collections.emptyMap(), Collections.singleton(segment1));
dataSource2 = new ImmutableDruidDataSource("datasource2", Collections.emptyMap(), Collections.singleton(segment2));
broadcastDatasourceNames = Collections.singleton("broadcastDatasource");
broadcastDatasource = new ImmutableDruidDataSource("broadcastDatasource", Collections.emptyMap(), Collections.singleton(broadcastSegment));
dataSources = ImmutableList.of(dataSource1, dataSource2, broadcastDatasource);
// This simulates a task that is ingesting to an existing non-broadcast datasource, with unpublished segments,
// while also having a broadcast segment loaded.
dataSource2ForRealtime = new ImmutableDruidDataSource("datasource2", Collections.emptyMap(), Collections.singleton(realtimeOnlySegment));
dataSourcesForRealtime = ImmutableList.of(dataSource2ForRealtime, broadcastDatasource);
}
Aggregations