use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class UnloadUnusedSegments method handleUnusedSegmentsForServer.
private void handleUnusedSegmentsForServer(ServerHolder serverHolder, Set<DataSegment> usedSegments, DruidCoordinatorRuntimeParams params, CoordinatorStats stats, boolean dropBroadcastOnly, Map<String, Boolean> broadcastStatusByDatasource) {
ImmutableDruidServer server = serverHolder.getServer();
for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
boolean isBroadcastDatasource = broadcastStatusByDatasource.computeIfAbsent(dataSource.getName(), (dataSourceName) -> {
List<Rule> rules = params.getDatabaseRuleManager().getRulesWithDefault(dataSource.getName());
for (Rule rule : rules) {
// A datasource is considered a broadcast datasource if it has any broadcast rules.
if (rule instanceof BroadcastDistributionRule) {
return true;
}
}
return false;
});
// the set of segments that were created by a task/indexer here, and exclude them.
if (dropBroadcastOnly && !isBroadcastDatasource) {
continue;
}
for (DataSegment segment : dataSource.getSegments()) {
if (!usedSegments.contains(segment)) {
LoadQueuePeon queuePeon = params.getLoadManagementPeons().get(server.getName());
if (!queuePeon.getSegmentsToDrop().contains(segment)) {
queuePeon.dropSegment(segment, () -> {
});
stats.addToTieredStat("unneededCount", server.getTier(), 1);
log.info("Dropping uneeded segment [%s] from server [%s] in tier [%s]", segment.getId(), server.getName(), server.getTier());
}
}
}
}
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class BroadcastDistributionRule method run.
@Override
public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment) {
final Set<ServerHolder> dropServerHolders = new HashSet<>();
// Find servers where we need to load the broadcast segments
final Set<ServerHolder> loadServerHolders = params.getDruidCluster().getAllServers().stream().filter((serverHolder) -> {
ServerType serverType = serverHolder.getServer().getType();
if (!serverType.isSegmentBroadcastTarget()) {
return false;
}
final boolean isServingSegment = serverHolder.isServingSegment(segment);
if (serverHolder.isDecommissioning()) {
if (isServingSegment && !serverHolder.isDroppingSegment(segment)) {
dropServerHolders.add(serverHolder);
}
return false;
}
return !isServingSegment && !serverHolder.isLoadingSegment(segment);
}).collect(Collectors.toSet());
final CoordinatorStats stats = new CoordinatorStats();
return stats.accumulate(assign(loadServerHolders, segment)).accumulate(drop(dropServerHolders, segment));
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResourceTest method testGetDatasourceLoadstatusFullAndComputeUsingClusterView.
@Test
public void testGetDatasourceLoadstatusFullAndComputeUsingClusterView() {
DataSegment datasource1Segment1 = new DataSegment("datasource1", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 10);
DataSegment datasource1Segment2 = new DataSegment("datasource1", Intervals.of("2010-01-22/P1D"), "", null, null, null, null, 0x9, 20);
List<DataSegment> segments = ImmutableList.of(datasource1Segment1, datasource1Segment2);
final Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = new HashMap<>();
Object2LongMap<String> tier1 = new Object2LongOpenHashMap<>();
tier1.put("datasource1", 0L);
Object2LongMap<String> tier2 = new Object2LongOpenHashMap<>();
tier2.put("datasource1", 3L);
underReplicationCountsPerDataSourcePerTier.put("tier1", tier1);
underReplicationCountsPerDataSourcePerTier.put("tier2", tier2);
// Test when datasource fully loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
DruidCoordinator druidCoordinator = EasyMock.createMock(DruidCoordinator.class);
EasyMock.expect(druidCoordinator.computeUnderReplicationCountsPerDataSourcePerTierForSegmentsUsingClusterView(segments)).andReturn(underReplicationCountsPerDataSourcePerTier).once();
EasyMock.replay(segmentsMetadataManager, druidCoordinator);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, druidCoordinator);
Response response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, "full", "computeUsingClusterView");
Assert.assertEquals(200, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertEquals(2, ((Map) response.getEntity()).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier1")).size());
Assert.assertEquals(1, ((Map) ((Map) response.getEntity()).get("tier2")).size());
Assert.assertEquals(0L, ((Map) ((Map) response.getEntity()).get("tier1")).get("datasource1"));
Assert.assertEquals(3L, ((Map) ((Map) response.getEntity()).get("tier2")).get("datasource1"));
EasyMock.verify(segmentsMetadataManager);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResourceTest method testGetDatasourceLoadstatusNoSegmentForInterval.
@Test
public void testGetDatasourceLoadstatusNoSegmentForInterval() {
List<DataSegment> segments = ImmutableList.of();
// Test when datasource fully loaded
EasyMock.expect(segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(EasyMock.eq("datasource1"), EasyMock.anyObject(Interval.class), EasyMock.anyBoolean())).andReturn(Optional.of(segments)).once();
EasyMock.replay(segmentsMetadataManager);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
Response response = dataSourcesResource.getDatasourceLoadstatus("datasource1", true, null, null, null, null);
Assert.assertEquals(204, response.getStatus());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class DataSourcesResourceTest method testSimpleGetTheDataSource.
@Test
public void testSimpleGetTheDataSource() {
DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap<>());
dataSource1.addSegment(new DataSegment("datasegment1", Intervals.of("2010-01-01/P1D"), "", null, null, null, null, 0x9, 10));
EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource1).atLeastOnce();
EasyMock.expect(server.getTier()).andReturn(null).atLeastOnce();
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).atLeastOnce();
EasyMock.replay(inventoryView, server);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, null, null, null, null);
Response response = dataSourcesResource.getDataSource("datasource1", null);
Assert.assertEquals(200, response.getStatus());
Map<String, Map<String, Object>> result = (Map<String, Map<String, Object>>) response.getEntity();
Assert.assertEquals(1, ((Map) (result.get("tiers").get(null))).get("segmentCount"));
Assert.assertEquals(10L, ((Map) (result.get("tiers").get(null))).get("size"));
Assert.assertEquals(10L, ((Map) (result.get("tiers").get(null))).get("replicatedSize"));
Assert.assertNotNull(result.get("segments"));
Assert.assertEquals("2010-01-01T00:00:00.000Z", result.get("segments").get("minTime").toString());
Assert.assertEquals("2010-01-02T00:00:00.000Z", result.get("segments").get("maxTime").toString());
Assert.assertEquals(1, result.get("segments").get("count"));
Assert.assertEquals(10L, result.get("segments").get("size"));
Assert.assertEquals(10L, result.get("segments").get("replicatedSize"));
EasyMock.verify(inventoryView, server);
}
Aggregations