use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DatasourcesResourceTest method testSimpleGetTheDataSource.
@Test
public void testSimpleGetTheDataSource() throws Exception {
DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap());
dataSource1.addSegment("partition", new DataSegment("datasegment1", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 10));
EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource1).atLeastOnce();
EasyMock.expect(server.getTier()).andReturn(null).atLeastOnce();
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).atLeastOnce();
EasyMock.replay(inventoryView, server);
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null, new AuthConfig());
Response response = datasourcesResource.getTheDataSource("datasource1", null);
Assert.assertEquals(200, response.getStatus());
Map<String, Map<String, Object>> result = (Map<String, Map<String, Object>>) response.getEntity();
Assert.assertEquals(1, ((Map) (result.get("tiers").get(null))).get("segmentCount"));
Assert.assertEquals(10L, ((Map) (result.get("tiers").get(null))).get("size"));
Assert.assertNotNull(result.get("segments"));
Assert.assertEquals("2010-01-01T00:00:00.000Z", result.get("segments").get("minTime").toString());
Assert.assertEquals("2010-01-02T00:00:00.000Z", result.get("segments").get("maxTime").toString());
Assert.assertEquals(1, result.get("segments").get("count"));
Assert.assertEquals(10L, result.get("segments").get("size"));
EasyMock.verify(inventoryView, server);
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DatasourcesResourceTest method testFullGetTheDataSource.
@Test
public void testFullGetTheDataSource() throws Exception {
DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap());
EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource1).atLeastOnce();
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).atLeastOnce();
EasyMock.replay(inventoryView, server);
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null, new AuthConfig());
Response response = datasourcesResource.getTheDataSource("datasource1", "full");
DruidDataSource result = (DruidDataSource) response.getEntity();
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(dataSource1, result);
EasyMock.verify(inventoryView, server);
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class ClientInfoResource method getSegmentsForDatasources.
private Map<String, List<DataSegment>> getSegmentsForDatasources() {
final Map<String, List<DataSegment>> dataSourceMap = Maps.newHashMap();
for (DruidServer server : serverInventoryView.getInventory()) {
for (DruidDataSource dataSource : server.getDataSources()) {
if (!dataSourceMap.containsKey(dataSource.getName())) {
dataSourceMap.put(dataSource.getName(), Lists.<DataSegment>newArrayList());
}
List<DataSegment> segments = dataSourceMap.get(dataSource.getName());
segments.addAll(dataSource.getSegments());
}
}
return dataSourceMap;
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DruidCoordinatorLogger method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
DruidCluster cluster = params.getDruidCluster();
CoordinatorStats stats = params.getCoordinatorStats();
ServiceEmitter emitter = params.getEmitter();
Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount");
if (assigned != null) {
for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) {
log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/assigned/count", assigned);
Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount");
if (dropped != null) {
for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) {
log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/dropped/count", dropped);
emitTieredStats(emitter, "segment/cost/raw", stats.getPerTierStats().get("initialCost"));
emitTieredStats(emitter, "segment/cost/normalization", stats.getPerTierStats().get("normalization"));
emitTieredStats(emitter, "segment/moved/count", stats.getPerTierStats().get("movedCount"));
emitTieredStats(emitter, "segment/deleted/count", stats.getPerTierStats().get("deletedCount"));
Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand");
if (normalized != null) {
emitTieredStats(emitter, "segment/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() {
@Override
public Number transformEntry(String key, AtomicLong value) {
return value.doubleValue() / 1000d;
}
}));
}
Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount");
if (unneeded != null) {
for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) {
log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/unneeded/count", stats.getPerTierStats().get("unneededCount"));
emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStats().get("overShadowedCount")));
Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount");
if (moved != null) {
for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) {
log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get());
}
}
final Map<String, AtomicLong> unmoved = stats.getPerTierStats().get("unmovedCount");
if (unmoved != null) {
for (Map.Entry<String, AtomicLong> entry : unmoved.entrySet()) {
log.info("[%s] : Let alone %,d segment(s)", entry.getKey(), entry.getValue().get());
}
}
log.info("Load Queues:");
for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
for (ServerHolder serverHolder : serverHolders) {
ImmutableDruidServer server = serverHolder.getServer();
LoadQueuePeon queuePeon = serverHolder.getPeon();
log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
if (log.isDebugEnabled()) {
for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
log.debug("Segment to load[%s]", segment);
}
for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
log.debug("Segment to drop[%s]", segment);
}
}
}
}
// Emit coordinator metrics
final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet();
for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) {
String serverName = entry.getKey();
LoadQueuePeon queuePeon = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
}
for (Map.Entry<String, AtomicLong> entry : coordinator.getSegmentAvailability().entrySet()) {
String datasource = entry.getKey();
Long count = entry.getValue().get();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/unavailable/count", count));
}
for (Map.Entry<String, CountingMap<String>> entry : coordinator.getReplicationStatus().entrySet()) {
String tier = entry.getKey();
CountingMap<String> datasourceAvailabilities = entry.getValue();
for (Map.Entry<String, AtomicLong> datasourceAvailability : datasourceAvailabilities.entrySet()) {
String datasource = datasourceAvailability.getKey();
Long count = datasourceAvailability.getValue().get();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/underReplicated/count", count));
}
}
// Emit segment metrics
CountingMap<String> segmentSizes = new CountingMap<String>();
CountingMap<String> segmentCounts = new CountingMap<String>();
for (DruidDataSource dataSource : params.getDataSources()) {
for (DataSegment segment : dataSource.getSegments()) {
segmentSizes.add(dataSource.getName(), segment.getSize());
segmentCounts.add(dataSource.getName(), 1L);
}
}
for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) {
String dataSource = entry.getKey();
Long size = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", size));
}
for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) {
String dataSource = entry.getKey();
Long count = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", count));
}
return params;
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DruidCoordinator method getLoadStatus.
public Map<String, Double> getLoadStatus() {
Map<String, Double> loadStatus = Maps.newHashMap();
for (DruidDataSource dataSource : metadataSegmentManager.getInventory()) {
final Set<DataSegment> segments = Sets.newHashSet(dataSource.getSegments());
final int availableSegmentSize = segments.size();
// remove loaded segments
for (DruidServer druidServer : serverInventoryView.getInventory()) {
final DruidDataSource loadedView = druidServer.getDataSource(dataSource.getName());
if (loadedView != null) {
segments.removeAll(loadedView.getSegments());
}
}
final int unloadedSegmentSize = segments.size();
loadStatus.put(dataSource.getName(), 100 * ((double) (availableSegmentSize - unloadedSegmentSize) / (double) availableSegmentSize));
}
return loadStatus;
}
Aggregations