use of org.apache.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class ServerManagerTest method testGetQueryRunnerForSegmentsWhenSegmentIsClosedReportingMissingSegments.
@Test
public void testGetQueryRunnerForSegmentsWhenSegmentIsClosedReportingMissingSegments() {
final Interval interval = Intervals.of("P1d/2011-04-01");
final SearchQuery query = searchQuery("test", interval, Granularities.ALL);
final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(DataSourceAnalysis.forDataSource(query.getDataSource()));
Assert.assertTrue(maybeTimeline.isPresent());
final List<TimelineObjectHolder<String, ReferenceCountingSegment>> holders = maybeTimeline.get().lookup(interval);
final List<SegmentDescriptor> closedSegments = new ArrayList<>();
for (TimelineObjectHolder<String, ReferenceCountingSegment> holder : holders) {
for (PartitionChunk<ReferenceCountingSegment> chunk : holder.getObject()) {
final ReferenceCountingSegment segment = chunk.getObject();
Assert.assertNotNull(segment.getId());
closedSegments.add(new SegmentDescriptor(segment.getDataInterval(), segment.getVersion(), segment.getId().getPartitionNum()));
segment.close();
}
}
final QueryRunner<Result<SearchResultValue>> queryRunner = serverManager.getQueryRunnerForSegments(query, closedSegments);
final ResponseContext responseContext = DefaultResponseContext.createEmpty();
final List<Result<SearchResultValue>> results = queryRunner.run(QueryPlus.wrap(query), responseContext).toList();
Assert.assertTrue(results.isEmpty());
Assert.assertNotNull(responseContext.getMissingSegments());
Assert.assertEquals(closedSegments, responseContext.getMissingSegments());
}
use of org.apache.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class DataSourcesResourceTest method testIsHandOffComplete.
@Test
public void testIsHandOffComplete() {
MetadataRuleManager databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
Rule loadRule = new IntervalLoadRule(Intervals.of("2013-01-02T00:00:00Z/2013-01-03T00:00:00Z"), null);
Rule dropRule = new IntervalDropRule(Intervals.of("2013-01-01T00:00:00Z/2013-01-02T00:00:00Z"));
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, databaseRuleManager, null, null, null);
// test dropped
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.replay(databaseRuleManager);
String interval1 = "2013-01-01T01:00:00Z/2013-01-01T02:00:00Z";
Response response1 = dataSourcesResource.isHandOffComplete("dataSource1", interval1, 1, "v1");
Assert.assertTrue((boolean) response1.getEntity());
EasyMock.verify(databaseRuleManager);
// test isn't dropped and no timeline found
EasyMock.reset(databaseRuleManager);
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(null).once();
EasyMock.replay(inventoryView, databaseRuleManager);
String interval2 = "2013-01-02T01:00:00Z/2013-01-02T02:00:00Z";
Response response2 = dataSourcesResource.isHandOffComplete("dataSource1", interval2, 1, "v1");
Assert.assertFalse((boolean) response2.getEntity());
EasyMock.verify(inventoryView, databaseRuleManager);
// test isn't dropped and timeline exist
String interval3 = "2013-01-02T02:00:00Z/2013-01-02T03:00:00Z";
SegmentLoadInfo segmentLoadInfo = new SegmentLoadInfo(createSegment(Intervals.of(interval3), "v1", 1));
segmentLoadInfo.addServer(createHistoricalServerMetadata("test"));
VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = new VersionedIntervalTimeline<String, SegmentLoadInfo>(null) {
@Override
public List<TimelineObjectHolder<String, SegmentLoadInfo>> lookupWithIncompletePartitions(Interval interval) {
PartitionHolder<SegmentLoadInfo> partitionHolder = new PartitionHolder<>(new NumberedPartitionChunk<>(1, 1, segmentLoadInfo));
List<TimelineObjectHolder<String, SegmentLoadInfo>> ret = new ArrayList<>();
ret.add(new TimelineObjectHolder<>(Intervals.of(interval3), "v1", partitionHolder));
return ret;
}
};
EasyMock.reset(inventoryView, databaseRuleManager);
EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(timeline).once();
EasyMock.replay(inventoryView, databaseRuleManager);
Response response3 = dataSourcesResource.isHandOffComplete("dataSource1", interval3, 1, "v1");
Assert.assertTrue((boolean) response3.getEntity());
EasyMock.verify(inventoryView, databaseRuleManager);
}
use of org.apache.druid.timeline.TimelineObjectHolder in project druid by druid-io.
the class CompactSegmentsTest method assertCompactSegments.
private void assertCompactSegments(CompactSegments compactSegments, Interval expectedInterval, int expectedRemainingSegments, int expectedCompactTaskCount, Supplier<String> expectedVersionSupplier) {
for (int i = 0; i < 3; i++) {
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(expectedCompactTaskCount, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
// One of dataSource is compacted
if (expectedRemainingSegments > 0) {
// If expectedRemainingSegments is positive, we check how many dataSources have the segments waiting for
// compaction.
long numDataSourceOfExpectedRemainingSegments = stats.getDataSources(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING).stream().mapToLong(ds -> stats.getDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING, ds)).filter(stat -> stat == expectedRemainingSegments).count();
Assert.assertEquals(i + 1, numDataSourceOfExpectedRemainingSegments);
} else {
// Otherwise, we check how many dataSources are in the coordinator stats.
Assert.assertEquals(2 - i, stats.getDataSources(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING).size());
}
}
for (int i = 0; i < 3; i++) {
final String dataSource = DATA_SOURCE_PREFIX + i;
List<TimelineObjectHolder<String, DataSegment>> holders = dataSources.get(dataSource).lookup(expectedInterval);
Assert.assertEquals(1, holders.size());
List<PartitionChunk<DataSegment>> chunks = Lists.newArrayList(holders.get(0).getObject());
Assert.assertEquals(2, chunks.size());
final String expectedVersion = expectedVersionSupplier.get();
for (PartitionChunk<DataSegment> chunk : chunks) {
Assert.assertEquals(expectedInterval, chunk.getObject().getInterval());
Assert.assertEquals(expectedVersion, chunk.getObject().getVersion());
}
}
}
Aggregations