use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorCustomDutyGroupsRunAsExpected.
@Test(timeout = 3000)
public void testCoordinatorCustomDutyGroupsRunAsExpected() throws Exception {
// Some nessesary setup to start the Coordinator
JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
EasyMock.expect(segmentsMetadataManager.isPollingDatabasePeriodically()).andReturn(true).anyTimes();
DruidDataSource dataSource = new DruidDataSource("dataSource1", Collections.emptyMap());
DataSegment dataSegment = new DataSegment("dataSource1", Intervals.of("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
dataSource.addSegment(dataSegment);
DataSourcesSnapshot dataSourcesSnapshot = new DataSourcesSnapshot(ImmutableMap.of(dataSource.getName(), dataSource.toImmutableDruidDataSource()));
EasyMock.expect(segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments()).andReturn(dataSourcesSnapshot).anyTimes();
EasyMock.replay(segmentsMetadataManager);
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
// Create CoordinatorCustomDutyGroups
// We will have two groups and each group has one duty
CountDownLatch latch1 = new CountDownLatch(1);
CoordinatorCustomDuty duty1 = new CoordinatorCustomDuty() {
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
latch1.countDown();
return params;
}
};
CoordinatorCustomDutyGroup group1 = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(duty1));
CountDownLatch latch2 = new CountDownLatch(1);
CoordinatorCustomDuty duty2 = new CoordinatorCustomDuty() {
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
latch2.countDown();
return params;
}
};
CoordinatorCustomDutyGroup group2 = new CoordinatorCustomDutyGroup("group2", Duration.standardSeconds(1), ImmutableList.of(duty2));
CoordinatorCustomDutyGroups groups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group1, group2));
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new HashSet<>(), groups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
coordinator.start();
// Wait until group 1 duty ran for latch1 to countdown
latch1.await();
// Wait until group 2 duty ran for latch2 to countdown
latch2.await();
}
use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.
the class KillCompactionConfig method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
long currentTimeMillis = System.currentTimeMillis();
if ((lastKillTime + period) < currentTimeMillis) {
lastKillTime = currentTimeMillis;
try {
RetryUtils.retry(() -> {
final byte[] currentBytes = CoordinatorCompactionConfig.getConfigInByteFromDb(connector, connectorConfig);
final CoordinatorCompactionConfig current = CoordinatorCompactionConfig.convertByteToConfig(jacksonConfigManager, currentBytes);
// If current compaction config is empty then there is nothing to do
if (CoordinatorCompactionConfig.empty().equals(current)) {
log.info("Finished running KillCompactionConfig duty. Nothing to do as compaction config is already empty.");
emitMetric(params.getEmitter(), 0);
return ConfigManager.SetResult.ok();
}
// Get all active datasources
// Note that we get all active datasources after getting compaction config to prevent race condition if new
// datasource and config are added.
Set<String> activeDatasources = sqlSegmentsMetadataManager.retrieveAllDataSourceNames();
final Map<String, DataSourceCompactionConfig> updated = current.getCompactionConfigs().stream().filter(dataSourceCompactionConfig -> activeDatasources.contains(dataSourceCompactionConfig.getDataSource())).collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
// Calculate number of compaction configs to remove for logging
int compactionConfigRemoved = current.getCompactionConfigs().size() - updated.size();
ConfigManager.SetResult result = jacksonConfigManager.set(CoordinatorCompactionConfig.CONFIG_KEY, currentBytes, CoordinatorCompactionConfig.from(current, ImmutableList.copyOf(updated.values())), new AuditInfo("KillCompactionConfig", "CoordinatorDuty for automatic deletion of compaction config", ""));
if (result.isOk()) {
log.info("Finished running KillCompactionConfig duty. Removed %,d compaction configs", compactionConfigRemoved);
emitMetric(params.getEmitter(), compactionConfigRemoved);
} else if (result.isRetryable()) {
// Failed but is retryable
log.debug("Retrying KillCompactionConfig duty");
throw new RetryableException(result.getException());
} else {
// Failed and not retryable
log.error(result.getException(), "Failed to kill compaction configurations");
emitMetric(params.getEmitter(), 0);
}
return result;
}, e -> e instanceof RetryableException, UPDATE_NUM_RETRY);
} catch (Exception e) {
log.error(e, "Failed to kill compaction configurations");
emitMetric(params.getEmitter(), 0);
}
}
return params;
}
use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.
the class DruidCoordinatorTest method testBalancerThreadNumber.
@Test
public void testBalancerThreadNumber() {
CoordinatorDynamicConfig dynamicConfig = EasyMock.createNiceMock(CoordinatorDynamicConfig.class);
EasyMock.expect(dynamicConfig.getBalancerComputeThreads()).andReturn(5).times(2);
EasyMock.expect(dynamicConfig.getBalancerComputeThreads()).andReturn(10).once();
JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(dynamicConfig)).anyTimes();
ScheduledExecutorFactory scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
EasyMock.replay(configManager, dynamicConfig, scheduledExecutorFactory);
DruidCoordinator c = new DruidCoordinator(druidCoordinatorConfig, null, configManager, null, null, null, () -> null, null, scheduledExecutorFactory, null, null, null, null, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), null, null, null, null, ZkEnablementConfig.ENABLED);
DruidCoordinator.DutiesRunnable duty = c.new DutiesRunnable(Collections.emptyList(), 0, "TEST");
// before initialization
Assert.assertEquals(0, c.getCachedBalancerThreadNumber());
Assert.assertNull(c.getBalancerExec());
// first initialization
duty.initBalancerExecutor();
System.out.println("c.getCachedBalancerThreadNumber(): " + c.getCachedBalancerThreadNumber());
Assert.assertEquals(5, c.getCachedBalancerThreadNumber());
ListeningExecutorService firstExec = c.getBalancerExec();
Assert.assertNotNull(firstExec);
// second initialization, expect no changes as cachedBalancerThreadNumber is not changed
duty.initBalancerExecutor();
Assert.assertEquals(5, c.getCachedBalancerThreadNumber());
ListeningExecutorService secondExec = c.getBalancerExec();
Assert.assertNotNull(secondExec);
Assert.assertTrue(firstExec == secondExec);
// third initialization, expect executor recreated as cachedBalancerThreadNumber is changed to 10
duty.initBalancerExecutor();
Assert.assertEquals(10, c.getCachedBalancerThreadNumber());
ListeningExecutorService thirdExec = c.getBalancerExec();
Assert.assertNotNull(thirdExec);
Assert.assertFalse(secondExec == thirdExec);
Assert.assertFalse(firstExec == thirdExec);
}
use of org.apache.druid.common.config.JacksonConfigManager in project druid by druid-io.
the class DruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
druidServer = EasyMock.createMock(DruidServer.class);
serverInventoryView = EasyMock.createMock(BatchServerInventoryView.class);
segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
loadQueuePeon = new CuratorLoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
loadQueuePeon.start();
druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
loadManagementPeons = new ConcurrentHashMap<>();
scheduledExecutorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
return Executors.newSingleThreadScheduledExecutor();
}
};
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new HashSet<>(), new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Aggregations