use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResourceTest method testGetDatasourceLoadstatusFull.
@Test
public void testGetDatasourceLoadstatusFull() {
DataSegment datasource1Segment1 = new DataSegment("datasource1", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 10);
DataSegment datasource1Segment2 = new DataSegment("datasource1", Intervals.of("2010-01-22/P1D"), "", null, null, null, null, 0x9, 20);
List<DataSegment> segments = ImmutableList.of(datasource1Segment1, datasource1Segment2);
final Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = new HashMap<>();
Object2LongMap<String> tier1 = new Object2LongOpenHashMap<>();
tier1.put("datasource1", 0L);
Object2LongMap<String> tier2 = new Object2LongOpenHashMap<>();
tier2.put("datasource1", 3L);
underReplicationCountsPerDataSourcePerTier.put("tier1", tier1);
underReplicationCountsPerDataSourcePerTier.put("tier2", tier2);
// Test when datasource fully loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
DruidCoordinator druidCoordinator = EasyMock.createMock(DruidCoordinator.class);
EasyMock.expect(druidCoordinator.computeUnderReplicationCountsPerDataSourcePerTierForSegments(segments)).andReturn(underReplicationCountsPerDataSourcePerTier).once();
EasyMock.replay(segmentsMetadataManager, druidCoordinator);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, druidCoordinator);
Response response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, "full", null);
Assert.assertEquals(200, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertEquals(2, ((Map) response.getEntity()).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier1")).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier2")).size());
Assert.assertEquals(0L, ((Map) ((Map) response.getEntity()).get("tier1")).get("datasource1"));
Assert.assertEquals(3L, ((Map) ((Map) response.getEntity()).get("tier2")).get("datasource1"));
EasyMock.verify(segmentsMetadataManager);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResourceTest method testGetDatasourceLoadstatusDefault.
@Test
public void testGetDatasourceLoadstatusDefault() {
DataSegment datasource1Segment1 = new DataSegment("datasource1", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 10);
DataSegment datasource1Segment2 = new DataSegment("datasource1", Intervals.of("2010-01-22/P1D"), "", null, null, null, null, 0x9, 20);
DataSegment datasource2Segment1 = new DataSegment("datasource2", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 30);
List<DataSegment> segments = ImmutableList.of(datasource1Segment1, datasource1Segment2);
Map<SegmentId, SegmentLoadInfo> completedLoadInfoMap = ImmutableMap.of(datasource1Segment1.getId(), new SegmentLoadInfo(datasource1Segment1), datasource1Segment2.getId(), new SegmentLoadInfo(datasource1Segment2), datasource2Segment1.getId(), new SegmentLoadInfo(datasource2Segment1));
Map<SegmentId, SegmentLoadInfo> halfLoadedInfoMap = ImmutableMap.of(datasource1Segment1.getId(), new SegmentLoadInfo(datasource1Segment1));
// Test when datasource fully loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
EasyMock.expect(inventoryView.getSegmentLoadInfos()).andReturn(completedLoadInfoMap).once();
EasyMock.replay(segmentsMetadataManager, inventoryView);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
Response response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, null, null);
Assert.assertEquals(200, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertEquals(1, ((Map) response.getEntity()).size());
Assert.assertTrue(((Map) response.getEntity()).containsKey("datasource1"));
Assert.assertEquals(100.0, ((Map) response.getEntity()).get("datasource1"));
EasyMock.verify(segmentsMetadataManager, inventoryView);
EasyMock.reset(segmentsMetadataManager, inventoryView);
// Test when datasource half loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
EasyMock.expect(inventoryView.getSegmentLoadInfos()).andReturn(halfLoadedInfoMap).once();
EasyMock.replay(segmentsMetadataManager, inventoryView);
dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, null, null);
Assert.assertEquals(200, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertEquals(1, ((Map) response.getEntity()).size());
Assert.assertTrue(((Map) response.getEntity()).containsKey("datasource1"));
Assert.assertEquals(50.0, ((Map) response.getEntity()).get("datasource1"));
EasyMock.verify(segmentsMetadataManager, inventoryView);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class LoadRuleTest method testDrop.
@Test
public void testDrop() {
final LoadQueuePeon mockPeon = createEmptyPeon();
mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockBalancerStrategy.pickServersToDrop(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(4);
EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
LoadRule rule = createLoadRule(ImmutableMap.of("hot", 0, DruidServer.DEFAULT_TIER, 0));
final DataSegment segment = createDataSegment("foo");
DruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 0);
server1.addDataSegment(segment);
DruidServer server2 = new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0);
server2.addDataSegment(segment);
DruidServer server3 = new DruidServer("serverNormNotServing", "hostNorm", null, 10, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1.toImmutableDruidServer(), mockPeon)).addTier(DruidServer.DEFAULT_TIER, new ServerHolder(server2.toImmutableDruidServer(), mockPeon), new ServerHolder(server3.toImmutableDruidServer(), mockPeon)).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "hot"));
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", DruidServer.DEFAULT_TIER));
EasyMock.verify(throttler, mockPeon);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class LoadRuleTest method testLoadDecommissioning.
/**
* 2 servers in different tiers, the first is decommissioning.
* Should not load a segment to the server that is decommissioning
*/
@Test
public void testLoadDecommissioning() {
final LoadQueuePeon mockPeon1 = createEmptyPeon();
final LoadQueuePeon mockPeon2 = createOneCallPeonMock();
LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 1, "tier2", 1));
final DataSegment segment = createDataSegment("foo");
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(1);
EasyMock.replay(mockPeon1, mockPeon2, mockBalancerStrategy);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", createServerHolder("tier1", mockPeon1, true)).addTier("tier2", createServerHolder("tier2", mockPeon2, false)).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier2"));
EasyMock.verify(mockPeon1, mockPeon2, mockBalancerStrategy);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class LoadRuleTest method testLoadReplicaDuringDecommissioning.
/**
* 2 tiers, 2 servers each, 1 server of the second tier is decommissioning.
* Should not load a segment to the server that is decommssioning.
*/
@Test
public void testLoadReplicaDuringDecommissioning() {
EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
final LoadQueuePeon mockPeon1 = createEmptyPeon();
final LoadQueuePeon mockPeon2 = createOneCallPeonMock();
final LoadQueuePeon mockPeon3 = createOneCallPeonMock();
final LoadQueuePeon mockPeon4 = createOneCallPeonMock();
LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 2, "tier2", 2));
final DataSegment segment = createDataSegment("foo");
throttler.registerReplicantCreation(EasyMock.eq("tier2"), EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().times(2);
ServerHolder holder1 = createServerHolder("tier1", mockPeon1, true);
ServerHolder holder2 = createServerHolder("tier1", mockPeon2, false);
ServerHolder holder3 = createServerHolder("tier2", mockPeon3, false);
ServerHolder holder4 = createServerHolder("tier2", mockPeon4, false);
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(segment, ImmutableList.of(holder2))).andReturn(holder2);
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(segment, ImmutableList.of(holder4, holder3))).andReturn(holder3);
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(segment, ImmutableList.of(holder4))).andReturn(holder4);
EasyMock.replay(throttler, mockPeon1, mockPeon2, mockPeon3, mockPeon4, mockBalancerStrategy);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", holder1, holder2).addTier("tier2", holder3, holder4).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier1"));
Assert.assertEquals(2L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "tier2"));
EasyMock.verify(throttler, mockPeon1, mockPeon2, mockPeon3, mockPeon4, mockBalancerStrategy);
}
Aggregations