use of org.apache.druid.timeline.SegmentWithOvershadowedStatus in project druid by druid-io.
the class MetadataSegmentView method poll.
private void poll() {
log.info("polling published segments from coordinator");
final JsonParserIterator<SegmentWithOvershadowedStatus> metadataSegments = getMetadataSegments(coordinatorDruidLeaderClient, jsonMapper, segmentWatcherConfig.getWatchedDataSources());
final ImmutableSortedSet.Builder<SegmentWithOvershadowedStatus> builder = ImmutableSortedSet.naturalOrder();
while (metadataSegments.hasNext()) {
final SegmentWithOvershadowedStatus segment = metadataSegments.next();
final DataSegment interned = DataSegmentInterner.intern(segment.getDataSegment());
final SegmentWithOvershadowedStatus segmentWithOvershadowedStatus = new SegmentWithOvershadowedStatus(interned, segment.isOvershadowed());
builder.add(segmentWithOvershadowedStatus);
}
publishedSegments = builder.build();
cachePopulated.countDown();
}
use of org.apache.druid.timeline.SegmentWithOvershadowedStatus in project druid by druid-io.
the class SystemSchemaTest method testSegmentsTable.
@Test
public void testSegmentsTable() throws Exception {
final SegmentsTable segmentsTable = new SegmentsTable(druidSchema, metadataView, new ObjectMapper(), authMapper);
final Set<SegmentWithOvershadowedStatus> publishedSegments = new HashSet<>(Arrays.asList(new SegmentWithOvershadowedStatus(publishedCompactedSegment1, true), new SegmentWithOvershadowedStatus(publishedCompactedSegment2, false), new SegmentWithOvershadowedStatus(publishedUncompactedSegment3, false), new SegmentWithOvershadowedStatus(segment1, true), new SegmentWithOvershadowedStatus(segment2, false)));
EasyMock.expect(metadataView.getPublishedSegments()).andReturn(publishedSegments.iterator()).once();
EasyMock.replay(client, request, responseHolder, responseHandler, metadataView);
DataContext dataContext = createDataContext(Users.SUPER);
final List<Object[]> rows = segmentsTable.scan(dataContext).toList();
rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0]));
// total segments = 8
// segments test1, test2 are published and available
// segment test3 is served by historical but unpublished or unused
// segments test4, test5 are not published but available (realtime segments)
// segment test2 is both published and served by a realtime server.
Assert.assertEquals(8, rows.size());
verifyRow(rows.get(0), "test1_2010-01-01T00:00:00.000Z_2011-01-01T00:00:00.000Z_version1", 100L, // partition_num
0L, // num_replicas
1L, // numRows
3L, // is_published
1L, // is_available
1L, // is_realtime
0L, // is_overshadowed
1L, // is_compacted
null);
verifyRow(rows.get(1), "test2_2011-01-01T00:00:00.000Z_2012-01-01T00:00:00.000Z_version2", 100L, // partition_num
0L, // x§segment test2 is served by historical and realtime servers
2L, // numRows
3L, // is_published
1L, // is_available
1L, // is_realtime
0L, // is_overshadowed,
0L, // is_compacted
null);
// segment test3 is unpublished and has a NumberedShardSpec with partitionNum = 2
verifyRow(rows.get(2), "test3_2012-01-01T00:00:00.000Z_2013-01-01T00:00:00.000Z_version3_2", 100L, // partition_num
2L, // num_replicas
1L, // numRows
2L, // is_published
0L, // is_available
1L, // is_realtime
0L, // is_overshadowed
0L, // is_compacted
null);
verifyRow(rows.get(3), "test4_2014-01-01T00:00:00.000Z_2015-01-01T00:00:00.000Z_version4", 100L, // partition_num
0L, // num_replicas
1L, // numRows
0L, // is_published
0L, // is_available
1L, // is_realtime
1L, // is_overshadowed
0L, // is_compacted
null);
verifyRow(rows.get(4), "test5_2015-01-01T00:00:00.000Z_2016-01-01T00:00:00.000Z_version5", 100L, // partition_num
0L, // num_replicas
1L, // numRows
0L, // is_published
0L, // is_available
1L, // is_realtime
1L, // is_overshadowed
0L, // is_compacted
null);
// wikipedia segments are published and unavailable, num_replicas is 0
// wikipedia segment 1 and 2 are compacted while 3 are not compacted
verifyRow(rows.get(5), "wikipedia1_2007-01-01T00:00:00.000Z_2008-01-01T00:00:00.000Z_version1", 53000L, // partition_num
0L, // num_replicas
0L, // numRows
0L, // is_published
1L, // is_available
0L, // is_realtime
0L, // is_overshadowed
1L, // is_compacted
expectedCompactionState);
verifyRow(rows.get(6), "wikipedia2_2008-01-01T00:00:00.000Z_2009-01-01T00:00:00.000Z_version2", 83000L, // partition_num
0L, // num_replicas
0L, // numRows
0L, // is_published
1L, // is_available
0L, // is_realtime
0L, // is_overshadowed
0L, // is_compacted
expectedCompactionState);
verifyRow(rows.get(7), "wikipedia3_2009-01-01T00:00:00.000Z_2010-01-01T00:00:00.000Z_version3", 47000L, // partition_num
0L, // num_replicas
0L, // numRows
0L, // is_published
1L, // is_available
0L, // is_realtime
0L, // is_overshadowed
0L, // is_compacted
null);
// Verify value types.
verifyTypes(rows, SystemSchema.SEGMENTS_SIGNATURE);
}
use of org.apache.druid.timeline.SegmentWithOvershadowedStatus in project druid by druid-io.
the class MetadataResource method getAllUsedSegmentsWithOvershadowedStatus.
private Response getAllUsedSegmentsWithOvershadowedStatus(HttpServletRequest req, @Nullable Set<String> dataSources) {
DataSourcesSnapshot dataSourcesSnapshot = segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments();
Collection<ImmutableDruidDataSource> dataSourcesWithUsedSegments = dataSourcesSnapshot.getDataSourcesWithAllUsedSegments();
if (dataSources != null && !dataSources.isEmpty()) {
dataSourcesWithUsedSegments = dataSourcesWithUsedSegments.stream().filter(dataSourceWithUsedSegments -> dataSources.contains(dataSourceWithUsedSegments.getName())).collect(Collectors.toList());
}
final Stream<DataSegment> usedSegments = dataSourcesWithUsedSegments.stream().flatMap(t -> t.getSegments().stream());
final Set<SegmentId> overshadowedSegments = dataSourcesSnapshot.getOvershadowedSegments();
final Stream<SegmentWithOvershadowedStatus> usedSegmentsWithOvershadowedStatus = usedSegments.map(segment -> new SegmentWithOvershadowedStatus(segment, overshadowedSegments.contains(segment.getId())));
final Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>> raGenerator = segment -> Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(segment.getDataSegment().getDataSource()));
final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources(req, usedSegmentsWithOvershadowedStatus::iterator, raGenerator, authorizerMapper);
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
return builder.entity(authorizedSegments).build();
}
Aggregations