Search in sources :

Example 11 with DataSourcesSnapshot

use of org.apache.druid.client.DataSourcesSnapshot in project druid by druid-io.

the class DruidCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    druidServer = EasyMock.createMock(DruidServer.class);
    serverInventoryView = EasyMock.createMock(BatchServerInventoryView.class);
    segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
    dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
    coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
    metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
    JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
    objectMapper = new DefaultObjectMapper();
    druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
    pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
    loadQueuePeon = new CuratorLoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
    loadQueuePeon.start();
    druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
    loadManagementPeons = new ConcurrentHashMap<>();
    scheduledExecutorFactory = new ScheduledExecutorFactory() {

        @Override
        public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
            return Executors.newSingleThreadScheduledExecutor();
        }
    };
    leaderAnnouncerLatch = new CountDownLatch(1);
    leaderUnannouncerLatch = new CountDownLatch(1);
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, new HashSet<>(), new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Also used : SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) HashSet(java.util.HashSet) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) AtomicReference(java.util.concurrent.atomic.AtomicReference) Duration(org.joda.time.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DruidNode(org.apache.druid.server.DruidNode) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) Before(org.junit.Before)

Example 12 with DataSourcesSnapshot

use of org.apache.druid.client.DataSourcesSnapshot in project druid by druid-io.

the class DruidCoordinatorTest method setupSegmentsMetadataMock.

private void setupSegmentsMetadataMock(DruidDataSource dataSource) {
    EasyMock.expect(segmentsMetadataManager.isPollingDatabasePeriodically()).andReturn(true).anyTimes();
    EasyMock.expect(segmentsMetadataManager.iterateAllUsedSegments()).andReturn(dataSource.getSegments()).anyTimes();
    EasyMock.expect(segmentsMetadataManager.getImmutableDataSourcesWithAllUsedSegments()).andReturn(Collections.singleton(dataSource.toImmutableDruidDataSource())).anyTimes();
    DataSourcesSnapshot dataSourcesSnapshot = new DataSourcesSnapshot(ImmutableMap.of(dataSource.getName(), dataSource.toImmutableDruidDataSource()));
    EasyMock.expect(segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments()).andReturn(dataSourcesSnapshot).anyTimes();
    EasyMock.expect(segmentsMetadataManager.retrieveAllDataSourceNames()).andReturn(Collections.singleton(dataSource.getName())).anyTimes();
    EasyMock.replay(segmentsMetadataManager);
    EasyMock.expect(this.dataSourcesSnapshot.iterateAllUsedSegmentsInSnapshot()).andReturn(dataSource.getSegments()).anyTimes();
    EasyMock.expect(this.dataSourcesSnapshot.getDataSourcesWithAllUsedSegments()).andReturn(Collections.singleton(dataSource.toImmutableDruidDataSource())).anyTimes();
    EasyMock.replay(this.dataSourcesSnapshot);
}
Also used : DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot)

Example 13 with DataSourcesSnapshot

use of org.apache.druid.client.DataSourcesSnapshot in project druid by druid-io.

the class CuratorDruidCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
    dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
    coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
    metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
    configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference<>(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(SEGPATH);
    curator.create().creatingParentsIfNeeded().forPath(SOURCE_LOAD_PATH);
    curator.create().creatingParentsIfNeeded().forPath(DESTINATION_LOAD_PATH);
    objectMapper = new DefaultObjectMapper();
    druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
    sourceLoadQueueChildrenCache = new PathChildrenCache(curator, SOURCE_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_src-%d"));
    destinationLoadQueueChildrenCache = new PathChildrenCache(curator, DESTINATION_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_dest-%d"));
    sourceLoadQueuePeon = new CuratorLoadQueuePeon(curator, SOURCE_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
    destinationLoadQueuePeon = new CuratorLoadQueuePeon(curator, DESTINATION_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
    druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
    loadManagementPeons = new ConcurrentHashMap<>();
    scheduledExecutorFactory = (corePoolSize, nameFormat) -> Executors.newSingleThreadScheduledExecutor();
    leaderAnnouncerLatch = new CountDownLatch(1);
    leaderUnannouncerLatch = new CountDownLatch(1);
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Also used : SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) Duration(org.joda.time.Duration) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) CountDownLatch(java.util.concurrent.CountDownLatch) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DruidNode(org.apache.druid.server.DruidNode) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) Before(org.junit.Before)

Example 14 with DataSourcesSnapshot

use of org.apache.druid.client.DataSourcesSnapshot in project druid by apache.

the class SqlSegmentsMetadataManager method replaceWithExistingSegmentIfPresent.

/**
 * For the garbage collector in Java, it's better to keep new objects short-living, but once they are old enough
 * (i. e. promoted to old generation), try to keep them alive. In {@link #poll()}, we fetch and deserialize all
 * existing segments each time, and then replace them in {@link #dataSourcesSnapshot}. This method allows to use
 * already existing (old) segments when possible, effectively interning them a-la {@link String#intern} or {@link
 * com.google.common.collect.Interner}, aiming to make the majority of {@link DataSegment} objects garbage soon after
 * they are deserialized and to die in young generation. It allows to avoid fragmentation of the old generation and
 * full GCs.
 */
private DataSegment replaceWithExistingSegmentIfPresent(DataSegment segment) {
    @MonotonicNonNull DataSourcesSnapshot dataSourcesSnapshot = this.dataSourcesSnapshot;
    if (dataSourcesSnapshot == null) {
        return segment;
    }
    @Nullable ImmutableDruidDataSource dataSource = dataSourcesSnapshot.getDataSource(segment.getDataSource());
    if (dataSource == null) {
        return segment;
    }
    DataSegment alreadyExistingSegment = dataSource.getSegment(segment.getId());
    return alreadyExistingSegment != null ? alreadyExistingSegment : segment;
}
Also used : ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) MonotonicNonNull(org.checkerframework.checker.nullness.qual.MonotonicNonNull) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) DataSegment(org.apache.druid.timeline.DataSegment) Nullable(javax.annotation.Nullable)

Example 15 with DataSourcesSnapshot

use of org.apache.druid.client.DataSourcesSnapshot in project druid by apache.

the class SqlSegmentsMetadataManagerTest method testPollOnDemand.

@Test
public void testPollOnDemand() {
    DataSourcesSnapshot dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
    Assert.assertNull(dataSourcesSnapshot);
    // This should return false and not wait/poll anything as we did not schedule periodic poll
    Assert.assertFalse(sqlSegmentsMetadataManager.useLatestSnapshotIfWithinDelay());
    Assert.assertNull(dataSourcesSnapshot);
    // This call will force on demand poll
    sqlSegmentsMetadataManager.forceOrWaitOngoingDatabasePoll();
    Assert.assertFalse(sqlSegmentsMetadataManager.isPollingDatabasePeriodically());
    Assert.assertTrue(sqlSegmentsMetadataManager.getLatestDatabasePoll() instanceof SqlSegmentsMetadataManager.OnDemandDatabasePoll);
    dataSourcesSnapshot = sqlSegmentsMetadataManager.getDataSourcesSnapshot();
    Assert.assertEquals(ImmutableSet.of("wikipedia"), sqlSegmentsMetadataManager.retrieveAllDataSourceNames());
    Assert.assertEquals(ImmutableList.of("wikipedia"), dataSourcesSnapshot.getDataSourcesWithAllUsedSegments().stream().map(ImmutableDruidDataSource::getName).collect(Collectors.toList()));
    Assert.assertEquals(ImmutableSet.of(segment1, segment2), ImmutableSet.copyOf(dataSourcesSnapshot.getDataSource("wikipedia").getSegments()));
    Assert.assertEquals(ImmutableSet.of(segment1, segment2), ImmutableSet.copyOf(dataSourcesSnapshot.iterateAllUsedSegmentsInSnapshot()));
}
Also used : ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) Test(org.junit.Test)

Aggregations

DataSourcesSnapshot (org.apache.druid.client.DataSourcesSnapshot)22 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)14 DataSegment (org.apache.druid.timeline.DataSegment)12 Test (org.junit.Test)8 CountDownLatch (java.util.concurrent.CountDownLatch)6 AtomicReference (java.util.concurrent.atomic.AtomicReference)6 JacksonConfigManager (org.apache.druid.common.config.JacksonConfigManager)6 NoopServiceAnnouncer (org.apache.druid.curator.discovery.NoopServiceAnnouncer)6 SegmentsMetadataManager (org.apache.druid.metadata.SegmentsMetadataManager)6 DruidNode (org.apache.druid.server.DruidNode)6 CoordinatorCustomDutyGroups (org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups)6 ZkPathsConfig (org.apache.druid.server.initialization.ZkPathsConfig)6 HashSet (java.util.HashSet)4 Nullable (javax.annotation.Nullable)4 PathChildrenCache (org.apache.curator.framework.recipes.cache.PathChildrenCache)4 DefaultObjectMapper (org.apache.druid.jackson.DefaultObjectMapper)4 MetadataRuleManager (org.apache.druid.metadata.MetadataRuleManager)4 Duration (org.joda.time.Duration)4 Before (org.junit.Before)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2