use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class ImmutableDruidDataSourceTest method testHashCodeMethodThrowsUnsupportedOperationException.
@Test
public void testHashCodeMethodThrowsUnsupportedOperationException() {
final DataSegment segment = getTestSegment();
final ImmutableDruidDataSource dataSource = getImmutableDruidDataSource(segment);
Assert.assertThrows("ImmutableDruidDataSource shouldn't be used as the key in containers", UnsupportedOperationException.class, dataSource::hashCode);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class ImmutableDruidDataSourceTest method testEqualsMethodThrowsUnsupportedOperationException.
@Test
public void testEqualsMethodThrowsUnsupportedOperationException() {
final DataSegment segment1 = getTestSegment();
final ImmutableDruidDataSource dataSource1 = getImmutableDruidDataSource(segment1);
final DataSegment segment2 = getTestSegment();
final ImmutableDruidDataSource dataSource2 = getImmutableDruidDataSource(segment2);
Assert.assertThrows("ImmutableDruidDataSource shouldn't be used as the key in containers", UnsupportedOperationException.class, () -> dataSource1.equals(dataSource2));
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class CachingClusteredClientPerfTest method testGetQueryRunnerForSegments_singleIntervalLargeSegments.
@Test(timeout = 10_000)
public void testGetQueryRunnerForSegments_singleIntervalLargeSegments() {
final int segmentCount = 30_000;
final Interval interval = Intervals.of("2021-02-13/2021-02-14");
final List<SegmentDescriptor> segmentDescriptors = new ArrayList<>(segmentCount);
final List<DataSegment> dataSegments = new ArrayList<>(segmentCount);
final VersionedIntervalTimeline<String, ServerSelector> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
final DruidServer server = new DruidServer("server", "localhost:9000", null, Long.MAX_VALUE, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_PRIORITY);
for (int ii = 0; ii < segmentCount; ii++) {
segmentDescriptors.add(new SegmentDescriptor(interval, "1", ii));
DataSegment segment = makeDataSegment("test", interval, "1", ii);
dataSegments.add(segment);
}
timeline.addAll(Iterators.transform(dataSegments.iterator(), segment -> {
ServerSelector ss = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
ss.addServerAndUpdateSegment(new QueryableDruidServer(server, new MockQueryRunner()), segment);
return new VersionedIntervalTimeline.PartitionChunkEntry<>(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(ss));
}));
TimelineServerView serverView = Mockito.mock(TimelineServerView.class);
QueryScheduler queryScheduler = Mockito.mock(QueryScheduler.class);
// mock scheduler to return same sequence as argument
Mockito.when(queryScheduler.run(any(), any())).thenAnswer(i -> i.getArgument(1));
Mockito.when(queryScheduler.prioritizeAndLaneQuery(any(), any())).thenAnswer(i -> ((QueryPlus) i.getArgument(0)).getQuery());
Mockito.doReturn(Optional.of(timeline)).when(serverView).getTimeline(any());
Mockito.doReturn(new MockQueryRunner()).when(serverView).getQueryRunner(any());
CachingClusteredClient cachingClusteredClient = new CachingClusteredClient(new MockQueryToolChestWareHouse(), serverView, MapCache.create(1024), TestHelper.makeJsonMapper(), Mockito.mock(CachePopulator.class), new CacheConfig(), Mockito.mock(DruidHttpClientConfig.class), Mockito.mock(DruidProcessingConfig.class), ForkJoinPool.commonPool(), queryScheduler, NoopJoinableFactory.INSTANCE, new NoopServiceEmitter());
Query<SegmentDescriptor> fakeQuery = makeFakeQuery(interval);
QueryRunner<SegmentDescriptor> queryRunner = cachingClusteredClient.getQueryRunnerForSegments(fakeQuery, segmentDescriptors);
Sequence<SegmentDescriptor> sequence = queryRunner.run(QueryPlus.wrap(fakeQuery));
Assert.assertEquals(segmentDescriptors, sequence.toList());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class CoordinatorServerViewTest method testMultipleServerAddedRemovedSegment.
@Test
public void testMultipleServerAddedRemovedSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(5);
// temporarily set latch count to 1
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final List<DruidServer> druidServers = Lists.transform(ImmutableList.of("localhost:0", "localhost:1", "localhost:2", "localhost:3", "localhost:4"), new Function<String, DruidServer>() {
@Override
public DruidServer apply(String input) {
return new DruidServer(input, input, null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
}
});
for (DruidServer druidServer : druidServers) {
setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
}
final List<DataSegment> segments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-01/2011-04-09", "v2"), Pair.of("2011-04-06/2011-04-09", "v3"), Pair.of("2011-04-01/2011-04-02", "v3")), new Function<Pair<String, String>, DataSegment>() {
@Override
public DataSegment apply(Pair<String, String> input) {
return dataSegmentWithIntervalAndVersion(input.lhs, input.rhs);
}
});
for (int i = 0; i < 5; ++i) {
announceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce the segment created by dataSegmentWithIntervalAndVersion("2011-04-01/2011-04-09", "v2")
unannounceSegmentForServer(druidServers.get(2), segments.get(2));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// renew segmentRemovedLatch since we still have 4 segments to unannounce
segmentRemovedLatch = new CountDownLatch(4);
timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-03", "v1", druidServers.get(0), segments.get(0)), createExpected("2011-04-03/2011-04-06", "v1", druidServers.get(1), segments.get(1)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09")));
// unannounce all the segments
for (int i = 0; i < 5; ++i) {
// skip the one that was previously unannounced
if (i != 2) {
unannounceSegmentForServer(druidServers.get(i), segments.get(i));
}
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2011-04-01/2011-04-09"))).size());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class CachingClusteredClientTest method testIfNoneMatch.
@Test
public void testIfNoneMatch() {
Interval interval = Intervals.of("2016/2017");
final DataSegment dataSegment = new DataSegment("dataSource", interval, "ver", ImmutableMap.of("type", "hdfs", "path", "/tmp"), ImmutableList.of("product"), ImmutableList.of("visited_sum"), NoneShardSpec.instance(), 9, 12334);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(servers[0], null), dataSegment);
timeline.add(interval, "ver", new SingleElementPartitionChunk<>(selector));
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).context(ImmutableMap.of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).randomQueryId().build();
final ResponseContext responseContext = initializeResponseContext();
getDefaultQueryRunner().run(QueryPlus.wrap(query), responseContext);
Assert.assertEquals("MDs2yIUvYLVzaG6zmwTH1plqaYE=", responseContext.getEntityTag());
}
Aggregations