Search in sources :

Example 1 with JacksonConfigManager

use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorCustomDutyGroupsRunAsExpected.

@Test(timeout = 3000)
public void testCoordinatorCustomDutyGroupsRunAsExpected() throws Exception {
    // Some nessesary setup to start the Coordinator
    JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    EasyMock.expect(segmentsMetadataManager.isPollingDatabasePeriodically()).andReturn(true).anyTimes();
    DruidDataSource dataSource = new DruidDataSource("dataSource1", Collections.emptyMap());
    DataSegment dataSegment = new DataSegment("dataSource1", Intervals.of("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
    dataSource.addSegment(dataSegment);
    DataSourcesSnapshot dataSourcesSnapshot = new DataSourcesSnapshot(ImmutableMap.of(dataSource.getName(), dataSource.toImmutableDruidDataSource()));
    EasyMock.expect(segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments()).andReturn(dataSourcesSnapshot).anyTimes();
    EasyMock.replay(segmentsMetadataManager);
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(serverInventoryView);
    // Create CoordinatorCustomDutyGroups
    // We will have two groups and each group has one duty
    CountDownLatch latch1 = new CountDownLatch(1);
    CoordinatorCustomDuty duty1 = new CoordinatorCustomDuty() {

        @Override
        public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
            latch1.countDown();
            return params;
        }
    };
    CoordinatorCustomDutyGroup group1 = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(duty1));
    CountDownLatch latch2 = new CountDownLatch(1);
    CoordinatorCustomDuty duty2 = new CoordinatorCustomDuty() {

        @Override
        public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
            latch2.countDown();
            return params;
        }
    };
    CoordinatorCustomDutyGroup group2 = new CoordinatorCustomDutyGroup("group2", Duration.standardSeconds(1), ImmutableList.of(duty2));
    CoordinatorCustomDutyGroups groups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group1, group2));
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, new HashSet<>(), groups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
    coordinator.start();
    // Wait until group 1 duty ran for latch1 to countdown
    latch1.await();
    // Wait until group 2 duty ran for latch2 to countdown
    latch2.await();
}
Also used : JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) DataSegment(org.apache.druid.timeline.DataSegment) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) CoordinatorCustomDuty(org.apache.druid.server.coordinator.duty.CoordinatorCustomDuty) DruidNode(org.apache.druid.server.DruidNode) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with JacksonConfigManager

use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.

the class KillCompactionConfig method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    long currentTimeMillis = System.currentTimeMillis();
    if ((lastKillTime + period) < currentTimeMillis) {
        lastKillTime = currentTimeMillis;
        try {
            RetryUtils.retry(() -> {
                final byte[] currentBytes = CoordinatorCompactionConfig.getConfigInByteFromDb(connector, connectorConfig);
                final CoordinatorCompactionConfig current = CoordinatorCompactionConfig.convertByteToConfig(jacksonConfigManager, currentBytes);
                // If current compaction config is empty then there is nothing to do
                if (CoordinatorCompactionConfig.empty().equals(current)) {
                    log.info("Finished running KillCompactionConfig duty. Nothing to do as compaction config is already empty.");
                    emitMetric(params.getEmitter(), 0);
                    return ConfigManager.SetResult.ok();
                }
                // Get all active datasources
                // Note that we get all active datasources after getting compaction config to prevent race condition if new
                // datasource and config are added.
                Set<String> activeDatasources = sqlSegmentsMetadataManager.retrieveAllDataSourceNames();
                final Map<String, DataSourceCompactionConfig> updated = current.getCompactionConfigs().stream().filter(dataSourceCompactionConfig -> activeDatasources.contains(dataSourceCompactionConfig.getDataSource())).collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
                // Calculate number of compaction configs to remove for logging
                int compactionConfigRemoved = current.getCompactionConfigs().size() - updated.size();
                ConfigManager.SetResult result = jacksonConfigManager.set(CoordinatorCompactionConfig.CONFIG_KEY, currentBytes, CoordinatorCompactionConfig.from(current, ImmutableList.copyOf(updated.values())), new AuditInfo("KillCompactionConfig", "CoordinatorDuty for automatic deletion of compaction config", ""));
                if (result.isOk()) {
                    log.info("Finished running KillCompactionConfig duty. Removed %,d compaction configs", compactionConfigRemoved);
                    emitMetric(params.getEmitter(), compactionConfigRemoved);
                } else if (result.isRetryable()) {
                    // Failed but is retryable
                    log.debug("Retrying KillCompactionConfig duty");
                    throw new RetryableException(result.getException());
                } else {
                    // Failed and not retryable
                    log.error(result.getException(), "Failed to kill compaction configurations");
                    emitMetric(params.getEmitter(), 0);
                }
                return result;
            }, e -> e instanceof RetryableException, UPDATE_NUM_RETRY);
        } catch (Exception e) {
            log.error(e, "Failed to kill compaction configurations");
            emitMetric(params.getEmitter(), 0);
        }
    }
    return params;
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) AuditInfo(org.apache.druid.audit.AuditInfo) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) Inject(com.google.inject.Inject) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) Set(java.util.Set) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) RetryableException(org.apache.druid.java.util.RetryableException) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) MetadataStorageConnector(org.apache.druid.metadata.MetadataStorageConnector) ImmutableList(com.google.common.collect.ImmutableList) ConfigManager(org.apache.druid.common.config.ConfigManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) Preconditions(com.google.common.base.Preconditions) SqlSegmentsMetadataManager(org.apache.druid.metadata.SqlSegmentsMetadataManager) RetryUtils(org.apache.druid.java.util.common.RetryUtils) AuditInfo(org.apache.druid.audit.AuditInfo) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) ConfigManager(org.apache.druid.common.config.ConfigManager) RetryableException(org.apache.druid.java.util.RetryableException) RetryableException(org.apache.druid.java.util.RetryableException) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig)

Example 3 with JacksonConfigManager

use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.

the class DruidCoordinatorTest method testBalancerThreadNumber.

@Test
public void testBalancerThreadNumber() {
    CoordinatorDynamicConfig dynamicConfig = EasyMock.createNiceMock(CoordinatorDynamicConfig.class);
    EasyMock.expect(dynamicConfig.getBalancerComputeThreads()).andReturn(5).times(2);
    EasyMock.expect(dynamicConfig.getBalancerComputeThreads()).andReturn(10).once();
    JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(dynamicConfig)).anyTimes();
    ScheduledExecutorFactory scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
    EasyMock.replay(configManager, dynamicConfig, scheduledExecutorFactory);
    DruidCoordinator c = new DruidCoordinator(druidCoordinatorConfig, null, configManager, null, null, null, () -> null, null, scheduledExecutorFactory, null, null, null, null, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), null, null, null, null, ZkEnablementConfig.ENABLED);
    DruidCoordinator.DutiesRunnable duty = c.new DutiesRunnable(Collections.emptyList(), 0, "TEST");
    // before initialization
    Assert.assertEquals(0, c.getCachedBalancerThreadNumber());
    Assert.assertNull(c.getBalancerExec());
    // first initialization
    duty.initBalancerExecutor();
    System.out.println("c.getCachedBalancerThreadNumber(): " + c.getCachedBalancerThreadNumber());
    Assert.assertEquals(5, c.getCachedBalancerThreadNumber());
    ListeningExecutorService firstExec = c.getBalancerExec();
    Assert.assertNotNull(firstExec);
    // second initialization, expect no changes as cachedBalancerThreadNumber is not changed
    duty.initBalancerExecutor();
    Assert.assertEquals(5, c.getCachedBalancerThreadNumber());
    ListeningExecutorService secondExec = c.getBalancerExec();
    Assert.assertNotNull(secondExec);
    Assert.assertTrue(firstExec == secondExec);
    // third initialization, expect executor recreated as cachedBalancerThreadNumber is changed to 10
    duty.initBalancerExecutor();
    Assert.assertEquals(10, c.getCachedBalancerThreadNumber());
    ListeningExecutorService thirdExec = c.getBalancerExec();
    Assert.assertNotNull(thirdExec);
    Assert.assertFalse(secondExec == thirdExec);
    Assert.assertFalse(firstExec == thirdExec);
}
Also used : ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) Test(org.junit.Test)

Example 4 with JacksonConfigManager

use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.

the class DruidCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    druidServer = EasyMock.createMock(DruidServer.class);
    serverInventoryView = EasyMock.createMock(BatchServerInventoryView.class);
    segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
    dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
    coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
    metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
    JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
    objectMapper = new DefaultObjectMapper();
    druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
    pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
    loadQueuePeon = new CuratorLoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
    loadQueuePeon.start();
    druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
    loadManagementPeons = new ConcurrentHashMap<>();
    scheduledExecutorFactory = new ScheduledExecutorFactory() {

        @Override
        public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
            return Executors.newSingleThreadScheduledExecutor();
        }
    };
    leaderAnnouncerLatch = new CountDownLatch(1);
    leaderUnannouncerLatch = new CountDownLatch(1);
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, new HashSet<>(), new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Also used : SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) HashSet(java.util.HashSet) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) AtomicReference(java.util.concurrent.atomic.AtomicReference) Duration(org.joda.time.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DruidNode(org.apache.druid.server.DruidNode) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) Before(org.junit.Before)

Aggregations

JacksonConfigManager (org.apache.druid.common.config.JacksonConfigManager)4 AtomicReference (java.util.concurrent.atomic.AtomicReference)3 CoordinatorCustomDutyGroups (org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups)3 HashSet (java.util.HashSet)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 DataSourcesSnapshot (org.apache.druid.client.DataSourcesSnapshot)2 NoopServiceAnnouncer (org.apache.druid.curator.discovery.NoopServiceAnnouncer)2 ScheduledExecutorFactory (org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory)2 DruidNode (org.apache.druid.server.DruidNode)2 ZkPathsConfig (org.apache.druid.server.initialization.ZkPathsConfig)2 Test (org.junit.Test)2 Preconditions (com.google.common.base.Preconditions)1 ImmutableList (com.google.common.collect.ImmutableList)1 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)1 Inject (com.google.inject.Inject)1 Map (java.util.Map)1 Set (java.util.Set)1 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)1 Function (java.util.function.Function)1 Collectors (java.util.stream.Collectors)1