use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorRun.
@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
String dataSource = "dataSource1";
String tier = "hot";
// Setup MetadataRuleManager
Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
EasyMock.expect(metadataRuleManager.getAllRules()).andReturn(ImmutableMap.of(dataSource, ImmutableList.of(foreverLoadRule))).atLeastOnce();
metadataRuleManager.stop();
EasyMock.expectLastCall().once();
EasyMock.replay(metadataRuleManager);
// Setup SegmentsMetadataManager
DruidDataSource[] dataSources = { new DruidDataSource(dataSource, Collections.emptyMap()) };
final DataSegment dataSegment = new DataSegment(dataSource, Intervals.of("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
dataSources[0].addSegment(dataSegment);
setupSegmentsMetadataMock(dataSources[0]);
ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
EasyMock.replay(immutableDruidDataSource);
// Setup ServerInventoryView
druidServer = new DruidServer("server1", "localhost", null, 5L, ServerType.HISTORICAL, tier, 0);
loadManagementPeons.put("server1", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
coordinator.start();
// Wait for this coordinator to become leader
leaderAnnouncerLatch.await();
// This coordinator should be leader by now
Assert.assertTrue(coordinator.isLeader());
Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
pathChildrenCache.start();
final CountDownLatch assignSegmentLatch = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(1, pathChildrenCache, ImmutableMap.of("2010-01-01T00:00:00.000Z_2010-01-02T00:00:00.000Z", dataSegment), druidServer);
assignSegmentLatch.await();
final CountDownLatch coordinatorRunLatch = new CountDownLatch(2);
serviceEmitter.latch = coordinatorRunLatch;
coordinatorRunLatch.await();
Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
Object2IntMap<String> numsUnavailableUsedSegmentsPerDataSource = coordinator.computeNumsUnavailableUsedSegmentsPerDataSource();
Assert.assertEquals(1, numsUnavailableUsedSegmentsPerDataSource.size());
Assert.assertEquals(0, numsUnavailableUsedSegmentsPerDataSource.getInt(dataSource));
Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = coordinator.computeUnderReplicationCountsPerDataSourcePerTier();
Assert.assertNotNull(underReplicationCountsPerDataSourcePerTier);
Assert.assertEquals(1, underReplicationCountsPerDataSourcePerTier.size());
Object2LongMap<String> underRepliicationCountsPerDataSource = underReplicationCountsPerDataSourcePerTier.get(tier);
Assert.assertNotNull(underRepliicationCountsPerDataSource);
Assert.assertEquals(1, underRepliicationCountsPerDataSource.size());
// noinspection deprecation
Assert.assertNotNull(underRepliicationCountsPerDataSource.get(dataSource));
// Simulated the adding of segment to druidServer during SegmentChangeRequestLoad event
// The load rules asks for 2 replicas, therefore 1 replica should still be pending
Assert.assertEquals(1L, underRepliicationCountsPerDataSource.getLong(dataSource));
Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTierUsingClusterView = coordinator.computeUnderReplicationCountsPerDataSourcePerTierUsingClusterView();
Assert.assertNotNull(underReplicationCountsPerDataSourcePerTier);
Assert.assertEquals(1, underReplicationCountsPerDataSourcePerTier.size());
Object2LongMap<String> underRepliicationCountsPerDataSourceUsingClusterView = underReplicationCountsPerDataSourcePerTierUsingClusterView.get(tier);
Assert.assertNotNull(underRepliicationCountsPerDataSourceUsingClusterView);
Assert.assertEquals(1, underRepliicationCountsPerDataSourceUsingClusterView.size());
// noinspection deprecation
Assert.assertNotNull(underRepliicationCountsPerDataSourceUsingClusterView.get(dataSource));
// Simulated the adding of segment to druidServer during SegmentChangeRequestLoad event
// The load rules asks for 2 replicas, but only 1 historical server in cluster. Since computing using cluster view
// the segments are replicated as many times as they can be given state of cluster, therefore should not be
// under-replicated.
Assert.assertEquals(0L, underRepliicationCountsPerDataSourceUsingClusterView.getLong(dataSource));
coordinator.stop();
leaderUnannouncerLatch.await();
Assert.assertFalse(coordinator.isLeader());
Assert.assertNull(coordinator.getCurrentLeader());
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class CostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = new ArrayList<>();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
List<DataSegment> segments = IntStream.range(0, maxSegments).mapToObj(j -> getSegment(j)).collect(Collectors.toList());
ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments);
String serverName = "DruidServer_Name_" + i;
ServerHolder serverHolder = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata(serverName, "localhost", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("DUMMY", dataSource), segments.size()), fromPeon);
serverHolderList.add(serverHolder);
}
// The best server to be available for next segment assignment has only 98 Segments
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
Map<SegmentId, DataSegment> segments = new HashMap<>();
for (int j = 0; j < (maxSegments - 2); j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getId(), segment);
EasyMock.expect(druidServer.getSegment(segment.getId())).andReturn(segment).anyTimes();
}
ImmutableDruidServerTests.expectSegments(druidServer, segments.values());
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class DiskNormalizedCostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = new ArrayList<>();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
List<DataSegment> segments = IntStream.range(0, maxSegments).mapToObj(j -> getSegment(j)).collect(Collectors.toList());
ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments);
serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("DUMMY", dataSource), segments.size()), fromPeon));
}
// The best server to be available for next segment assignment has greater max Size
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(100000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
List<DataSegment> segments = new ArrayList<>();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.add(segment);
EasyMock.expect(druidServer.getSegment(segment.getId())).andReturn(segment).anyTimes();
}
ImmutableDruidServerTests.expectSegments(druidServer, segments);
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
Aggregations