use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class RunRules method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
CoordinatorStats stats = new CoordinatorStats();
DruidCluster cluster = params.getDruidCluster();
if (cluster.isEmpty()) {
log.warn("Uh... I have no servers. Not assigning anything...");
return params;
}
// Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
// eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
// as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
// to unload such segments in UnloadUnusedSegments.
Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
for (String tier : cluster.getTierNames()) {
replicatorThrottler.updateReplicationState(tier);
}
DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
// Run through all matched rules for used segments
DateTime now = DateTimes.nowUtc();
MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
int missingRules = 0;
final Set<String> broadcastDatasources = new HashSet<>();
for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
for (Rule rule : rules) {
// executes before BalanceSegments.
if (rule instanceof BroadcastDistributionRule) {
broadcastDatasources.add(dataSource.getName());
break;
}
}
}
for (DataSegment segment : params.getUsedSegments()) {
if (overshadowed.contains(segment.getId())) {
// Skipping overshadowed segments
continue;
}
List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
boolean foundMatchingRule = false;
for (Rule rule : rules) {
if (rule.appliesTo(segment, now)) {
if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
}
stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
foundMatchingRule = true;
break;
}
}
if (!foundMatchingRule) {
if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
segmentsWithMissingRules.add(segment.getId());
}
missingRules++;
}
}
if (!segmentsWithMissingRules.isEmpty()) {
log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
}
return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class MetadataResource method getAllUsedSegmentsWithOvershadowedStatus.
private Response getAllUsedSegmentsWithOvershadowedStatus(HttpServletRequest req, @Nullable Set<String> dataSources) {
DataSourcesSnapshot dataSourcesSnapshot = segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments();
Collection<ImmutableDruidDataSource> dataSourcesWithUsedSegments = dataSourcesSnapshot.getDataSourcesWithAllUsedSegments();
if (dataSources != null && !dataSources.isEmpty()) {
dataSourcesWithUsedSegments = dataSourcesWithUsedSegments.stream().filter(dataSourceWithUsedSegments -> dataSources.contains(dataSourceWithUsedSegments.getName())).collect(Collectors.toList());
}
final Stream<DataSegment> usedSegments = dataSourcesWithUsedSegments.stream().flatMap(t -> t.getSegments().stream());
final Set<SegmentId> overshadowedSegments = dataSourcesSnapshot.getOvershadowedSegments();
final Stream<SegmentWithOvershadowedStatus> usedSegmentsWithOvershadowedStatus = usedSegments.map(segment -> new SegmentWithOvershadowedStatus(segment, overshadowedSegments.contains(segment.getId())));
final Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>> raGenerator = segment -> Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(segment.getDataSegment().getDataSource()));
final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources(req, usedSegmentsWithOvershadowedStatus::iterator, raGenerator, authorizerMapper);
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
return builder.entity(authorizedSegments).build();
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class CachingClusteredClientCacheKeyManagerTest method makeServerSelector.
/**
* using partitionNumber, its possible to create segments with different ids
*/
private SegmentServerSelector makeServerSelector(boolean isHistorical, int partitionNumber) {
ServerSelector serverSelector = mock(ServerSelector.class);
QueryableDruidServer queryableDruidServer = mock(QueryableDruidServer.class);
DruidServer server = mock(DruidServer.class);
SegmentId segmentId = SegmentId.dummy("data-source", partitionNumber);
DataSegment segment = new DataSegment(segmentId, null, null, null, new NumberedShardSpec(partitionNumber, 10), null, 0, 0);
expect(server.isSegmentReplicationTarget()).andReturn(isHistorical).anyTimes();
expect(serverSelector.pick(query)).andReturn(queryableDruidServer).anyTimes();
expect(queryableDruidServer.getServer()).andReturn(server).anyTimes();
expect(serverSelector.getSegment()).andReturn(segment).anyTimes();
replay(serverSelector, queryableDruidServer, server);
return new SegmentServerSelector(serverSelector, segmentId.toDescriptor());
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class HttpServerInventoryViewTest method testSimple.
@Test(timeout = 60_000L)
public void testSimple() throws Exception {
ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(DataNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
final DataSegment segment1 = new DataSegment("test1", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment2 = new DataSegment("test2", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment3 = new DataSegment("test3", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment4 = new DataSegment("test4", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment5 = new DataSegment("non-loading-datasource", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
TestHttpClient httpClient = new TestHttpClient(ImmutableList.of(Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment1)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestDrop(segment1), new SegmentChangeRequestLoad(segment2), new SegmentChangeRequestLoad(segment3)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(true, "force reset counter", ChangeRequestHistory.Counter.ZERO, ImmutableList.of())))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment3), new SegmentChangeRequestLoad(segment4), new SegmentChangeRequestLoad(segment5))))))));
DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 8080, null, true, false), NodeRole.HISTORICAL, ImmutableMap.of(DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0)));
HttpServerInventoryView httpServerInventoryView = new HttpServerInventoryView(jsonMapper, httpClient, druidNodeDiscoveryProvider, (pair) -> !pair.rhs.getDataSource().equals("non-loading-datasource"), new HttpServerInventoryViewConfig(null, null, null), "test");
CountDownLatch initializeCallback1 = new CountDownLatch(1);
Map<SegmentId, CountDownLatch> segmentAddLathces = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1), segment3.getId(), new CountDownLatch(1), segment4.getId(), new CountDownLatch(1));
Map<SegmentId, CountDownLatch> segmentDropLatches = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1));
httpServerInventoryView.registerSegmentCallback(Execs.directExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
segmentAddLathces.get(segment.getId()).countDown();
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
segmentDropLatches.get(segment.getId()).countDown();
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentViewInitialized() {
initializeCallback1.countDown();
return ServerView.CallbackAction.CONTINUE;
}
});
final CountDownLatch serverRemovedCalled = new CountDownLatch(1);
httpServerInventoryView.registerServerRemovedCallback(Execs.directExecutor(), new ServerView.ServerRemovedCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
if (server.getName().equals("host:8080")) {
serverRemovedCalled.countDown();
return ServerView.CallbackAction.CONTINUE;
} else {
throw new RE("Unknown server [%s]", server.getName());
}
}
});
httpServerInventoryView.start();
druidNodeDiscovery.listener.nodesAdded(ImmutableList.of(druidNode));
initializeCallback1.await();
segmentAddLathces.get(segment1.getId()).await();
segmentDropLatches.get(segment1.getId()).await();
segmentAddLathces.get(segment2.getId()).await();
segmentAddLathces.get(segment3.getId()).await();
segmentAddLathces.get(segment4.getId()).await();
segmentDropLatches.get(segment2.getId()).await();
DruidServer druidServer = httpServerInventoryView.getInventoryValue("host:8080");
Assert.assertEquals(ImmutableMap.of(segment3.getId(), segment3, segment4.getId(), segment4), Maps.uniqueIndex(druidServer.iterateAllSegments(), DataSegment::getId));
druidNodeDiscovery.listener.nodesRemoved(ImmutableList.of(druidNode));
serverRemovedCalled.await();
Assert.assertNull(httpServerInventoryView.getInventoryValue("host:8080"));
EasyMock.verify(druidNodeDiscoveryProvider);
httpServerInventoryView.stop();
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DataSourcesResourceTest method testMarkSegmentsAsUnused.
@Test
public void testMarkSegmentsAsUnused() {
final DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap<>());
final Set<SegmentId> segmentIds = dataSegmentList.stream().filter(segment -> segment.getDataSource().equals(dataSource1.getName())).map(DataSegment::getId).collect(Collectors.toSet());
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).once();
EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource1).once();
EasyMock.expect(segmentsMetadataManager.markSegmentsAsUnused(segmentIds)).andReturn(1).once();
EasyMock.replay(segmentsMetadataManager, inventoryView, server);
final DataSourcesResource.MarkDataSourceSegmentsPayload payload = new DataSourcesResource.MarkDataSourceSegmentsPayload(null, segmentIds.stream().map(SegmentId::toString).collect(Collectors.toSet()));
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
Response response = dataSourcesResource.markSegmentsAsUnused("datasource1", payload);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(ImmutableMap.of("numChangedSegments", 1), response.getEntity());
EasyMock.verify(segmentsMetadataManager, inventoryView, server);
}
Aggregations