use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class DruidCoordinatorTest method testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments.
@Test
public void testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments() {
CoordinatorCustomDutyGroup group = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(new KillSupervisorsCustomDuty(new Duration("PT1S"), null)));
CoordinatorCustomDutyGroups customDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group));
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), customDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
// Since CompactSegments is not enabled in Custom Duty Group, then CompactSegments must be created in IndexingServiceDuties
List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
Assert.assertTrue(indexingDuties.stream().anyMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
// CompactSegments should not exist in Custom Duty Group
List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
Assert.assertTrue(compactSegmentsDutyFromCustomGroups.isEmpty());
// CompactSegments returned by this method should be created using the DruidCoordinatorConfig in the DruidCoordinator
CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
Assert.assertNotNull(duty);
Assert.assertEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorCustomDutyGroupsRunAsExpected.
@Test(timeout = 3000)
public void testCoordinatorCustomDutyGroupsRunAsExpected() throws Exception {
// Some nessesary setup to start the Coordinator
JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
EasyMock.expect(segmentsMetadataManager.isPollingDatabasePeriodically()).andReturn(true).anyTimes();
DruidDataSource dataSource = new DruidDataSource("dataSource1", Collections.emptyMap());
DataSegment dataSegment = new DataSegment("dataSource1", Intervals.of("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
dataSource.addSegment(dataSegment);
DataSourcesSnapshot dataSourcesSnapshot = new DataSourcesSnapshot(ImmutableMap.of(dataSource.getName(), dataSource.toImmutableDruidDataSource()));
EasyMock.expect(segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments()).andReturn(dataSourcesSnapshot).anyTimes();
EasyMock.replay(segmentsMetadataManager);
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
// Create CoordinatorCustomDutyGroups
// We will have two groups and each group has one duty
CountDownLatch latch1 = new CountDownLatch(1);
CoordinatorCustomDuty duty1 = new CoordinatorCustomDuty() {
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
latch1.countDown();
return params;
}
};
CoordinatorCustomDutyGroup group1 = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(duty1));
CountDownLatch latch2 = new CountDownLatch(1);
CoordinatorCustomDuty duty2 = new CoordinatorCustomDuty() {
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
latch2.countDown();
return params;
}
};
CoordinatorCustomDutyGroup group2 = new CoordinatorCustomDutyGroup("group2", Duration.standardSeconds(1), ImmutableList.of(duty2));
CoordinatorCustomDutyGroups groups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group1, group2));
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new HashSet<>(), groups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
coordinator.start();
// Wait until group 1 duty ran for latch1 to countdown
latch1.await();
// Wait until group 2 duty ran for latch2 to countdown
latch2.await();
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class CuratorDruidCoordinatorTest method setupView.
private void setupView() throws Exception {
baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {
@Override
public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
super.registerSegmentCallback(exec, new SegmentCallback() {
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
CallbackAction res = callback.segmentAdded(server, segment);
segmentAddedLatch.countDown();
return res;
}
@Override
public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
CallbackAction res = callback.segmentRemoved(server, segment);
segmentRemovedLatch.countDown();
return res;
}
@Override
public CallbackAction segmentViewInitialized() {
CallbackAction res = callback.segmentViewInitialized();
segmentViewInitLatch.countDown();
return res;
}
});
}
};
serverView = new CoordinatorServerView(baseView, new CoordinatorSegmentWatcherConfig());
baseView.start();
sourceLoadQueuePeon.start();
destinationLoadQueuePeon.start();
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class TieredBrokerHostSelectorTest method setUp.
@Before
public void setUp() {
druidNodeDiscoveryProvider = EasyMock.createStrictMock(DruidNodeDiscoveryProvider.class);
node1 = new DiscoveryDruidNode(new DruidNode("hotBroker", "hotHost", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
node2 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost1", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
node3 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost2", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
druidNodeDiscovery = new DruidNodeDiscovery() {
@Override
public Collection<DiscoveryDruidNode> getAllNodes() {
return ImmutableSet.of(node1, node2, node3);
}
@Override
public void registerListener(Listener listener) {
listener.nodesAdded(ImmutableList.of(node1, node2, node3));
listener.nodeViewInitialized();
}
};
EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
brokerSelector = new TieredBrokerHostSelector(new TestRuleManager(null, null), new TieredBrokerConfig() {
@Override
public LinkedHashMap<String, String> getTierToBrokerMap() {
return new LinkedHashMap<String, String>(ImmutableMap.of("hot", "hotBroker", "medium", "mediumBroker", DruidServer.DEFAULT_TIER, "coldBroker"));
}
@Override
public String getDefaultBrokerServiceName() {
return "hotBroker";
}
}, druidNodeDiscoveryProvider, Arrays.asList(new ManualTieredBrokerSelectorStrategy(null), new TimeBoundaryTieredBrokerSelectorStrategy(), new PriorityTieredBrokerSelectorStrategy(0, 1)));
brokerSelector.start();
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class CuratorDruidLeaderSelectorTest method testSimple.
@Test(timeout = 60_000L)
public void testSimple() throws Exception {
curator.start();
curator.blockUntilConnected();
AtomicReference<String> currLeader = new AtomicReference<>();
String latchPath = "/testlatchPath";
CuratorDruidLeaderSelector leaderSelector1 = new CuratorDruidLeaderSelector(curator, new DruidNode("s1", "h1", false, 8080, null, true, false), latchPath);
leaderSelector1.registerListener(new DruidLeaderSelector.Listener() {
@Override
public void becomeLeader() {
logger.info("listener1.becomeLeader().");
currLeader.set("h1:8080");
throw new RuntimeException("I am Rogue.");
}
@Override
public void stopBeingLeader() {
logger.info("listener1.stopBeingLeader().");
throw new RuntimeException("I said I am Rogue.");
}
});
while (!"h1:8080".equals(currLeader.get())) {
logger.info("current leader = [%s]", currLeader.get());
Thread.sleep(100);
}
Assert.assertTrue(leaderSelector1.localTerm() >= 1);
CuratorDruidLeaderSelector leaderSelector2 = new CuratorDruidLeaderSelector(curator, new DruidNode("s2", "h2", false, 8080, null, true, false), latchPath);
leaderSelector2.registerListener(new DruidLeaderSelector.Listener() {
private AtomicInteger attemptCount = new AtomicInteger(0);
@Override
public void becomeLeader() {
logger.info("listener2.becomeLeader().");
if (attemptCount.getAndIncrement() < 1) {
throw new RuntimeException("will become leader on next attempt.");
}
currLeader.set("h2:8080");
}
@Override
public void stopBeingLeader() {
logger.info("listener2.stopBeingLeader().");
throw new RuntimeException("I am broken.");
}
});
while (!"h2:8080".equals(currLeader.get())) {
logger.info("current leader = [%s]", currLeader.get());
Thread.sleep(100);
}
Assert.assertTrue(leaderSelector2.isLeader());
Assert.assertEquals("http://h2:8080", leaderSelector1.getCurrentLeader());
Assert.assertEquals(2, leaderSelector2.localTerm());
CuratorDruidLeaderSelector leaderSelector3 = new CuratorDruidLeaderSelector(curator, new DruidNode("s3", "h3", false, 8080, null, true, false), latchPath);
leaderSelector3.registerListener(new DruidLeaderSelector.Listener() {
@Override
public void becomeLeader() {
logger.info("listener3.becomeLeader().");
currLeader.set("h3:8080");
}
@Override
public void stopBeingLeader() {
logger.info("listener3.stopBeingLeader().");
}
});
leaderSelector2.unregisterListener();
while (!"h3:8080".equals(currLeader.get())) {
logger.info("current leader = [%s]", currLeader.get());
Thread.sleep(100);
}
Assert.assertTrue(leaderSelector3.isLeader());
Assert.assertEquals("http://h3:8080", leaderSelector1.getCurrentLeader());
Assert.assertEquals(1, leaderSelector3.localTerm());
}
Aggregations