use of org.apache.druid.client.DataSourcesSnapshot in project druid by apache.
the class SqlSegmentsMetadataManagerTest method testPollPeriodicallyAndOnDemandInterleave.
@Test(timeout = 60_000)
public void testPollPeriodicallyAndOnDemandInterleave() throws Exception {
DataSourcesSnapshot dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertNull(dataSourcesSnapshot);
sqlSegmentsMetadataManager.startPollingDatabasePeriodically();
Assert.assertTrue(sqlSegmentsMetadataManager.isPollingDatabasePeriodically());
// This call make sure that the first poll is completed
sqlSegmentsMetadataManager.useLatestSnapshotIfWithinDelay();
Assert.assertTrue(sqlSegmentsMetadataManager.getLatestDatabasePoll() instanceof SqlSegmentsMetadataManager.PeriodicDatabasePoll);
dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertEquals(ImmutableList.of("wikipedia"), dataSourcesSnapshot.getDataSourcesWithAllUsedSegments().stream().map(ImmutableDruidDataSource::getName).collect(Collectors.toList()));
final String newDataSource2 = "wikipedia2";
final DataSegment newSegment2 = createNewSegment1(newDataSource2);
publisher.publishSegment(newSegment2);
// This call will force on demand poll
sqlSegmentsMetadataManager.forceOrWaitOngoingDatabasePoll();
Assert.assertTrue(sqlSegmentsMetadataManager.isPollingDatabasePeriodically());
Assert.assertTrue(sqlSegmentsMetadataManager.getLatestDatabasePoll() instanceof SqlSegmentsMetadataManager.OnDemandDatabasePoll);
// New datasource should now be in the snapshot since we just force on demand poll.
dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertEquals(ImmutableList.of("wikipedia2", "wikipedia"), dataSourcesSnapshot.getDataSourcesWithAllUsedSegments().stream().map(ImmutableDruidDataSource::getName).collect(Collectors.toList()));
final String newDataSource3 = "wikipedia3";
final DataSegment newSegment3 = createNewSegment1(newDataSource3);
publisher.publishSegment(newSegment3);
// This time wait for periodic poll (not doing on demand poll so we have to wait a bit...)
while (sqlSegmentsMetadataManager.getDataSourcesSnapshot().getDataSource(newDataSource3) == null) {
Thread.sleep(1000);
}
Assert.assertTrue(sqlSegmentsMetadataManager.isPollingDatabasePeriodically());
Assert.assertTrue(sqlSegmentsMetadataManager.getLatestDatabasePoll() instanceof SqlSegmentsMetadataManager.PeriodicDatabasePoll);
dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertEquals(ImmutableList.of("wikipedia2", "wikipedia3", "wikipedia"), dataSourcesSnapshot.getDataSourcesWithAllUsedSegments().stream().map(ImmutableDruidDataSource::getName).collect(Collectors.toList()));
}
use of org.apache.druid.client.DataSourcesSnapshot in project druid by apache.
the class CuratorDruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference<>(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(SEGPATH);
curator.create().creatingParentsIfNeeded().forPath(SOURCE_LOAD_PATH);
curator.create().creatingParentsIfNeeded().forPath(DESTINATION_LOAD_PATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
sourceLoadQueueChildrenCache = new PathChildrenCache(curator, SOURCE_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_src-%d"));
destinationLoadQueueChildrenCache = new PathChildrenCache(curator, DESTINATION_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_dest-%d"));
sourceLoadQueuePeon = new CuratorLoadQueuePeon(curator, SOURCE_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
destinationLoadQueuePeon = new CuratorLoadQueuePeon(curator, DESTINATION_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
loadManagementPeons = new ConcurrentHashMap<>();
scheduledExecutorFactory = (corePoolSize, nameFormat) -> Executors.newSingleThreadScheduledExecutor();
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.client.DataSourcesSnapshot in project druid by druid-io.
the class DataSourcesSnapshotBenchmark method setUp.
@Setup
public void setUp() {
long start = System.currentTimeMillis();
Map<String, ImmutableDruidDataSource> dataSources = new HashMap<>();
for (int i = 0; i < numDataSources; i++) {
String dataSource = StringUtils.format("ds-%d", i);
List<DataSegment> segments = new ArrayList<>();
for (int j = 0; j < numSegmentPerDataSource; j++) {
segments.add(new DataSegment(dataSource, TEST_SEGMENT_INTERVAL, String.valueOf(j), Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), NoneShardSpec.instance(), 0, 10L));
}
dataSources.put(dataSource, new ImmutableDruidDataSource(dataSource, Collections.emptyMap(), segments));
}
snapshot = new DataSourcesSnapshot(dataSources);
System.out.println("Setup Time " + (System.currentTimeMillis() - start) + " ms");
}
use of org.apache.druid.client.DataSourcesSnapshot in project druid by druid-io.
the class MetadataResource method getAllUsedSegmentsWithOvershadowedStatus.
private Response getAllUsedSegmentsWithOvershadowedStatus(HttpServletRequest req, @Nullable Set<String> dataSources) {
DataSourcesSnapshot dataSourcesSnapshot = segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments();
Collection<ImmutableDruidDataSource> dataSourcesWithUsedSegments = dataSourcesSnapshot.getDataSourcesWithAllUsedSegments();
if (dataSources != null && !dataSources.isEmpty()) {
dataSourcesWithUsedSegments = dataSourcesWithUsedSegments.stream().filter(dataSourceWithUsedSegments -> dataSources.contains(dataSourceWithUsedSegments.getName())).collect(Collectors.toList());
}
final Stream<DataSegment> usedSegments = dataSourcesWithUsedSegments.stream().flatMap(t -> t.getSegments().stream());
final Set<SegmentId> overshadowedSegments = dataSourcesSnapshot.getOvershadowedSegments();
final Stream<SegmentWithOvershadowedStatus> usedSegmentsWithOvershadowedStatus = usedSegments.map(segment -> new SegmentWithOvershadowedStatus(segment, overshadowedSegments.contains(segment.getId())));
final Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>> raGenerator = segment -> Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(segment.getDataSegment().getDataSource()));
final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources(req, usedSegmentsWithOvershadowedStatus::iterator, raGenerator, authorizerMapper);
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
return builder.entity(authorizedSegments).build();
}
use of org.apache.druid.client.DataSourcesSnapshot in project druid by druid-io.
the class SqlSegmentsMetadataManagerTest method testPollOnDemand.
@Test
public void testPollOnDemand() {
DataSourcesSnapshot dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertNull(dataSourcesSnapshot);
// This should return false and not wait/poll anything as we did not schedule periodic poll
Assert.assertFalse(sqlSegmentsMetadataManager.useLatestSnapshotIfWithinDelay());
Assert.assertNull(dataSourcesSnapshot);
// This call will force on demand poll
sqlSegmentsMetadataManager.forceOrWaitOngoingDatabasePoll();
Assert.assertFalse(sqlSegmentsMetadataManager.isPollingDatabasePeriodically());
Assert.assertTrue(sqlSegmentsMetadataManager.getLatestDatabasePoll() instanceof SqlSegmentsMetadataManager.OnDemandDatabasePoll);
dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
Assert.assertEquals(ImmutableSet.of("wikipedia"), sqlSegmentsMetadataManager.retrieveAllDataSourceNames());
Assert.assertEquals(ImmutableList.of("wikipedia"), dataSourcesSnapshot.getDataSourcesWithAllUsedSegments().stream().map(ImmutableDruidDataSource::getName).collect(Collectors.toList()));
Assert.assertEquals(ImmutableSet.of(segment1, segment2), ImmutableSet.copyOf(dataSourcesSnapshot.getDataSource("wikipedia").getSegments()));
Assert.assertEquals(ImmutableSet.of(segment1, segment2), ImmutableSet.copyOf(dataSourcesSnapshot.iterateAllUsedSegmentsInSnapshot()));
}
Aggregations