use of it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap in project druid by druid-io.
the class DataSourcesResourceTest method testGetDatasourceLoadstatusFullAndComputeUsingClusterView.
@Test
public void testGetDatasourceLoadstatusFullAndComputeUsingClusterView() {
DataSegment datasource1Segment1 = new DataSegment("datasource1", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 10);
DataSegment datasource1Segment2 = new DataSegment("datasource1", Intervals.of("2010-01-22/P1D"), "", null, null, null, null, 0x9, 20);
List<DataSegment> segments = ImmutableList.of(datasource1Segment1, datasource1Segment2);
final Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = new HashMap<>();
Object2LongMap<String> tier1 = new Object2LongOpenHashMap<>();
tier1.put("datasource1", 0L);
Object2LongMap<String> tier2 = new Object2LongOpenHashMap<>();
tier2.put("datasource1", 3L);
underReplicationCountsPerDataSourcePerTier.put("tier1", tier1);
underReplicationCountsPerDataSourcePerTier.put("tier2", tier2);
// Test when datasource fully loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
DruidCoordinator druidCoordinator = EasyMock.createMock(DruidCoordinator.class);
EasyMock.expect(druidCoordinator.computeUnderReplicationCountsPerDataSourcePerTierForSegmentsUsingClusterView(segments)).andReturn(underReplicationCountsPerDataSourcePerTier).once();
EasyMock.replay(segmentsMetadataManager, druidCoordinator);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, druidCoordinator);
Response response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, "full", "computeUsingClusterView");
Assert.assertEquals(200, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertEquals(2, ((Map) response.getEntity()).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier1")).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier2")).size());
Assert.assertEquals(0L, ((Map) ((Map) response.getEntity()).get("tier1")).get("datasource1"));
Assert.assertEquals(3L, ((Map) ((Map) response.getEntity()).get("tier2")).get("datasource1"));
EasyMock.verify(segmentsMetadataManager);
}
use of it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap in project druid by druid-io.
the class DataSourcesResourceTest method testGetDatasourceLoadstatusFull.
@Test
public void testGetDatasourceLoadstatusFull() {
DataSegment datasource1Segment1 = new DataSegment("datasource1", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 10);
DataSegment datasource1Segment2 = new DataSegment("datasource1", Intervals.of("2010-01-22/P1D"), "", null, null, null, null, 0x9, 20);
List<DataSegment> segments = ImmutableList.of(datasource1Segment1, datasource1Segment2);
final Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = new HashMap<>();
Object2LongMap<String> tier1 = new Object2LongOpenHashMap<>();
tier1.put("datasource1", 0L);
Object2LongMap<String> tier2 = new Object2LongOpenHashMap<>();
tier2.put("datasource1", 3L);
underReplicationCountsPerDataSourcePerTier.put("tier1", tier1);
underReplicationCountsPerDataSourcePerTier.put("tier2", tier2);
// Test when datasource fully loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
DruidCoordinator druidCoordinator = EasyMock.createMock(DruidCoordinator.class);
EasyMock.expect(druidCoordinator.computeUnderReplicationCountsPerDataSourcePerTierForSegments(segments)).andReturn(underReplicationCountsPerDataSourcePerTier).once();
EasyMock.replay(segmentsMetadataManager, druidCoordinator);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, druidCoordinator);
Response response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, "full", null);
Assert.assertEquals(200, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertEquals(2, ((Map) response.getEntity()).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier1")).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier2")).size());
Assert.assertEquals(0L, ((Map) ((Map) response.getEntity()).get("tier1")).get("datasource1"));
Assert.assertEquals(3L, ((Map) ((Map) response.getEntity()).get("tier2")).get("datasource1"));
EasyMock.verify(segmentsMetadataManager);
}
use of it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap in project geode by apache.
the class HeapEvictor method getAllSortedRegionList.
private List<LocalRegion> getAllSortedRegionList() {
List<LocalRegion> allRegionList = getAllRegionList();
// Capture the sizes so that they do not change while sorting
final Object2LongOpenHashMap sizes = new Object2LongOpenHashMap(allRegionList.size());
for (LocalRegion r : allRegionList) {
long size = r instanceof BucketRegion ? ((BucketRegion) r).getSizeForEviction() : r.size();
sizes.put(r, size);
}
// Sort with respect to other PR buckets also in case of multiple PRs
Collections.sort(allRegionList, new Comparator<LocalRegion>() {
public int compare(LocalRegion r1, LocalRegion r2) {
long numEntries1 = sizes.get(r1);
long numEntries2 = sizes.get(r2);
if (numEntries1 > numEntries2) {
return -1;
} else if (numEntries1 < numEntries2) {
return 1;
}
return 0;
}
});
return allRegionList;
}
use of it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap in project druid by druid-io.
the class HistoricalMetricsMonitor method doMonitor.
@Override
public boolean doMonitor(ServiceEmitter emitter) {
emitter.emit(new ServiceMetricEvent.Builder().build("segment/max", serverConfig.getMaxSize()));
final Object2LongOpenHashMap<String> pendingDeleteSizes = new Object2LongOpenHashMap<>();
for (DataSegment segment : segmentLoadDropMgr.getPendingDeleteSnapshot()) {
pendingDeleteSizes.addTo(segment.getDataSource(), segment.getSize());
}
for (final Object2LongMap.Entry<String> entry : pendingDeleteSizes.object2LongEntrySet()) {
final String dataSource = entry.getKey();
final long pendingDeleteSize = entry.getLongValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).setDimension("tier", serverConfig.getTier()).setDimension("priority", String.valueOf(serverConfig.getPriority())).build("segment/pendingDelete", pendingDeleteSize));
}
for (Map.Entry<String, Long> entry : segmentManager.getDataSourceSizes().entrySet()) {
String dataSource = entry.getKey();
long used = entry.getValue();
final ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).setDimension("tier", serverConfig.getTier()).setDimension("priority", String.valueOf(serverConfig.getPriority()));
emitter.emit(builder.build("segment/used", used));
final double usedPercent = serverConfig.getMaxSize() == 0 ? 0 : used / (double) serverConfig.getMaxSize();
emitter.emit(builder.build("segment/usedPercent", usedPercent));
}
for (Map.Entry<String, Long> entry : segmentManager.getDataSourceCounts().entrySet()) {
String dataSource = entry.getKey();
long count = entry.getValue();
final ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).setDimension("tier", serverConfig.getTier()).setDimension("priority", String.valueOf(serverConfig.getPriority()));
emitter.emit(builder.build("segment/count", count));
}
return true;
}
Aggregations