Search in sources :

Example 46 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class RunRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    replicatorThrottler.updateParams(coordinator.getDynamicConfigs().getReplicationThrottleLimit(), coordinator.getDynamicConfigs().getReplicantLifetime(), false);
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    if (cluster.isEmpty()) {
        log.warn("Uh... I have no servers. Not assigning anything...");
        return params;
    }
    // Get used segments which are overshadowed by other used segments. Those would not need to be loaded and
    // eventually will be unloaded from Historical servers. Segments overshadowed by *served* used segments are marked
    // as unused in MarkAsUnusedOvershadowedSegments, and then eventually Coordinator sends commands to Historical nodes
    // to unload such segments in UnloadUnusedSegments.
    Set<SegmentId> overshadowed = params.getDataSourcesSnapshot().getOvershadowedSegments();
    for (String tier : cluster.getTierNames()) {
        replicatorThrottler.updateReplicationState(tier);
    }
    DruidCoordinatorRuntimeParams paramsWithReplicationManager = params.buildFromExistingWithoutSegmentsMetadata().withReplicationManager(replicatorThrottler).build();
    // Run through all matched rules for used segments
    DateTime now = DateTimes.nowUtc();
    MetadataRuleManager databaseRuleManager = paramsWithReplicationManager.getDatabaseRuleManager();
    final List<SegmentId> segmentsWithMissingRules = Lists.newArrayListWithCapacity(MAX_MISSING_RULES);
    int missingRules = 0;
    final Set<String> broadcastDatasources = new HashSet<>();
    for (ImmutableDruidDataSource dataSource : params.getDataSourcesSnapshot().getDataSourcesMap().values()) {
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(dataSource.getName());
        for (Rule rule : rules) {
            // executes before BalanceSegments.
            if (rule instanceof BroadcastDistributionRule) {
                broadcastDatasources.add(dataSource.getName());
                break;
            }
        }
    }
    for (DataSegment segment : params.getUsedSegments()) {
        if (overshadowed.contains(segment.getId())) {
            // Skipping overshadowed segments
            continue;
        }
        List<Rule> rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource());
        boolean foundMatchingRule = false;
        for (Rule rule : rules) {
            if (rule.appliesTo(segment, now)) {
                if (stats.getGlobalStat("totalNonPrimaryReplicantsLoaded") >= paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad() && !paramsWithReplicationManager.getReplicationManager().isLoadPrimaryReplicantsOnly()) {
                    log.info("Maximum number of non-primary replicants [%d] have been loaded for the current RunRules execution. Only loading primary replicants from here on for this coordinator run cycle.", paramsWithReplicationManager.getCoordinatorDynamicConfig().getMaxNonPrimaryReplicantsToLoad());
                    paramsWithReplicationManager.getReplicationManager().setLoadPrimaryReplicantsOnly(true);
                }
                stats.accumulate(rule.run(coordinator, paramsWithReplicationManager, segment));
                foundMatchingRule = true;
                break;
            }
        }
        if (!foundMatchingRule) {
            if (segmentsWithMissingRules.size() < MAX_MISSING_RULES) {
                segmentsWithMissingRules.add(segment.getId());
            }
            missingRules++;
        }
    }
    if (!segmentsWithMissingRules.isEmpty()) {
        log.makeAlert("Unable to find matching rules!").addData("segmentsWithMissingRulesCount", missingRules).addData("segmentsWithMissingRules", segmentsWithMissingRules).emit();
    }
    return params.buildFromExisting().withCoordinatorStats(stats).withBroadcastDatasources(broadcastDatasources).build();
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) BroadcastDistributionRule(org.apache.druid.server.coordinator.rules.BroadcastDistributionRule) Rule(org.apache.druid.server.coordinator.rules.Rule) HashSet(java.util.HashSet)

Example 47 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class MetadataResource method getAllUsedSegmentsWithOvershadowedStatus.

private Response getAllUsedSegmentsWithOvershadowedStatus(HttpServletRequest req, @Nullable Set<String> dataSources) {
    DataSourcesSnapshot dataSourcesSnapshot = segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments();
    Collection<ImmutableDruidDataSource> dataSourcesWithUsedSegments = dataSourcesSnapshot.getDataSourcesWithAllUsedSegments();
    if (dataSources != null && !dataSources.isEmpty()) {
        dataSourcesWithUsedSegments = dataSourcesWithUsedSegments.stream().filter(dataSourceWithUsedSegments -> dataSources.contains(dataSourceWithUsedSegments.getName())).collect(Collectors.toList());
    }
    final Stream<DataSegment> usedSegments = dataSourcesWithUsedSegments.stream().flatMap(t -> t.getSegments().stream());
    final Set<SegmentId> overshadowedSegments = dataSourcesSnapshot.getOvershadowedSegments();
    final Stream<SegmentWithOvershadowedStatus> usedSegmentsWithOvershadowedStatus = usedSegments.map(segment -> new SegmentWithOvershadowedStatus(segment, overshadowedSegments.contains(segment.getId())));
    final Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>> raGenerator = segment -> Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(segment.getDataSegment().getDataSource()));
    final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources(req, usedSegmentsWithOvershadowedStatus::iterator, raGenerator, authorizerMapper);
    Response.ResponseBuilder builder = Response.status(Response.Status.OK);
    return builder.entity(authorizedSegments).build();
}
Also used : Iterables(com.google.common.collect.Iterables) PathParam(javax.ws.rs.PathParam) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) Inject(com.google.inject.Inject) SegmentWithOvershadowedStatus(org.apache.druid.timeline.SegmentWithOvershadowedStatus) Path(javax.ws.rs.Path) Collections2(com.google.common.collect.Collections2) ResourceFilters(com.sun.jersey.spi.container.ResourceFilters) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) TreeSet(java.util.TreeSet) Interval(org.joda.time.Interval) HttpServletRequest(javax.servlet.http.HttpServletRequest) MediaType(javax.ws.rs.core.MediaType) QueryParam(javax.ws.rs.QueryParam) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) Nullable(javax.annotation.Nullable) JettyUtils(org.apache.druid.server.JettyUtils) Function(com.google.common.base.Function) POST(javax.ws.rs.POST) Context(javax.ws.rs.core.Context) SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) Collection(java.util.Collection) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Segments(org.apache.druid.indexing.overlord.Segments) Set(java.util.Set) Json(org.apache.druid.guice.annotations.Json) AuthorizationUtils(org.apache.druid.server.security.AuthorizationUtils) Collectors(java.util.stream.Collectors) DatasourceResourceFilter(org.apache.druid.server.http.security.DatasourceResourceFilter) List(java.util.List) Stream(java.util.stream.Stream) Response(javax.ws.rs.core.Response) ResourceAction(org.apache.druid.server.security.ResourceAction) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) DataSegment(org.apache.druid.timeline.DataSegment) UriInfo(javax.ws.rs.core.UriInfo) SegmentId(org.apache.druid.timeline.SegmentId) Collections(java.util.Collections) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) DataSegment(org.apache.druid.timeline.DataSegment) Response(javax.ws.rs.core.Response) SegmentWithOvershadowedStatus(org.apache.druid.timeline.SegmentWithOvershadowedStatus) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot)

Example 48 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class CachingClusteredClientCacheKeyManagerTest method makeServerSelector.

/**
 * using partitionNumber, its possible to create segments with different ids
 */
private SegmentServerSelector makeServerSelector(boolean isHistorical, int partitionNumber) {
    ServerSelector serverSelector = mock(ServerSelector.class);
    QueryableDruidServer queryableDruidServer = mock(QueryableDruidServer.class);
    DruidServer server = mock(DruidServer.class);
    SegmentId segmentId = SegmentId.dummy("data-source", partitionNumber);
    DataSegment segment = new DataSegment(segmentId, null, null, null, new NumberedShardSpec(partitionNumber, 10), null, 0, 0);
    expect(server.isSegmentReplicationTarget()).andReturn(isHistorical).anyTimes();
    expect(serverSelector.pick(query)).andReturn(queryableDruidServer).anyTimes();
    expect(queryableDruidServer.getServer()).andReturn(server).anyTimes();
    expect(serverSelector.getSegment()).andReturn(segment).anyTimes();
    replay(serverSelector, queryableDruidServer, server);
    return new SegmentServerSelector(serverSelector, segmentId.toDescriptor());
}
Also used : ServerSelector(org.apache.druid.client.selector.ServerSelector) SegmentId(org.apache.druid.timeline.SegmentId) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) DataSegment(org.apache.druid.timeline.DataSegment) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer)

Example 49 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class HttpServerInventoryViewTest method testSimple.

@Test(timeout = 60_000L)
public void testSimple() throws Exception {
    ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
    TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
    DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
    EasyMock.expect(druidNodeDiscoveryProvider.getForService(DataNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    final DataSegment segment1 = new DataSegment("test1", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment2 = new DataSegment("test2", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment3 = new DataSegment("test3", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment4 = new DataSegment("test4", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment5 = new DataSegment("non-loading-datasource", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    TestHttpClient httpClient = new TestHttpClient(ImmutableList.of(Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment1)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestDrop(segment1), new SegmentChangeRequestLoad(segment2), new SegmentChangeRequestLoad(segment3)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(true, "force reset counter", ChangeRequestHistory.Counter.ZERO, ImmutableList.of())))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment3), new SegmentChangeRequestLoad(segment4), new SegmentChangeRequestLoad(segment5))))))));
    DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 8080, null, true, false), NodeRole.HISTORICAL, ImmutableMap.of(DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0)));
    HttpServerInventoryView httpServerInventoryView = new HttpServerInventoryView(jsonMapper, httpClient, druidNodeDiscoveryProvider, (pair) -> !pair.rhs.getDataSource().equals("non-loading-datasource"), new HttpServerInventoryViewConfig(null, null, null), "test");
    CountDownLatch initializeCallback1 = new CountDownLatch(1);
    Map<SegmentId, CountDownLatch> segmentAddLathces = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1), segment3.getId(), new CountDownLatch(1), segment4.getId(), new CountDownLatch(1));
    Map<SegmentId, CountDownLatch> segmentDropLatches = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1));
    httpServerInventoryView.registerSegmentCallback(Execs.directExecutor(), new ServerView.SegmentCallback() {

        @Override
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentAddLathces.get(segment.getId()).countDown();
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
            segmentDropLatches.get(segment.getId()).countDown();
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentViewInitialized() {
            initializeCallback1.countDown();
            return ServerView.CallbackAction.CONTINUE;
        }
    });
    final CountDownLatch serverRemovedCalled = new CountDownLatch(1);
    httpServerInventoryView.registerServerRemovedCallback(Execs.directExecutor(), new ServerView.ServerRemovedCallback() {

        @Override
        public ServerView.CallbackAction serverRemoved(DruidServer server) {
            if (server.getName().equals("host:8080")) {
                serverRemovedCalled.countDown();
                return ServerView.CallbackAction.CONTINUE;
            } else {
                throw new RE("Unknown server [%s]", server.getName());
            }
        }
    });
    httpServerInventoryView.start();
    druidNodeDiscovery.listener.nodesAdded(ImmutableList.of(druidNode));
    initializeCallback1.await();
    segmentAddLathces.get(segment1.getId()).await();
    segmentDropLatches.get(segment1.getId()).await();
    segmentAddLathces.get(segment2.getId()).await();
    segmentAddLathces.get(segment3.getId()).await();
    segmentAddLathces.get(segment4.getId()).await();
    segmentDropLatches.get(segment2.getId()).await();
    DruidServer druidServer = httpServerInventoryView.getInventoryValue("host:8080");
    Assert.assertEquals(ImmutableMap.of(segment3.getId(), segment3, segment4.getId(), segment4), Maps.uniqueIndex(druidServer.iterateAllSegments(), DataSegment::getId));
    druidNodeDiscovery.listener.nodesRemoved(ImmutableList.of(druidNode));
    serverRemovedCalled.await();
    Assert.assertNull(httpServerInventoryView.getInventoryValue("host:8080"));
    EasyMock.verify(druidNodeDiscoveryProvider);
    httpServerInventoryView.stop();
}
Also used : DataSegment(org.apache.druid.timeline.DataSegment) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) SegmentChangeRequestLoad(org.apache.druid.server.coordination.SegmentChangeRequestLoad) SegmentId(org.apache.druid.timeline.SegmentId) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) RE(org.apache.druid.java.util.common.RE) SegmentChangeRequestDrop(org.apache.druid.server.coordination.SegmentChangeRequestDrop) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) ByteArrayInputStream(java.io.ByteArrayInputStream) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) ChangeRequestsSnapshot(org.apache.druid.server.coordination.ChangeRequestsSnapshot) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) DataNodeService(org.apache.druid.discovery.DataNodeService) Test(org.junit.Test)

Example 50 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class DataSourcesResourceTest method testMarkSegmentsAsUnused.

@Test
public void testMarkSegmentsAsUnused() {
    final DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap<>());
    final Set<SegmentId> segmentIds = dataSegmentList.stream().filter(segment -> segment.getDataSource().equals(dataSource1.getName())).map(DataSegment::getId).collect(Collectors.toSet());
    EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).once();
    EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource1).once();
    EasyMock.expect(segmentsMetadataManager.markSegmentsAsUnused(segmentIds)).andReturn(1).once();
    EasyMock.replay(segmentsMetadataManager, inventoryView, server);
    final DataSourcesResource.MarkDataSourceSegmentsPayload payload = new DataSourcesResource.MarkDataSourceSegmentsPayload(null, segmentIds.stream().map(SegmentId::toString).collect(Collectors.toSet()));
    DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
    Response response = dataSourcesResource.markSegmentsAsUnused("datasource1", payload);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(ImmutableMap.of("numChangedSegments", 1), response.getEntity());
    EasyMock.verify(segmentsMetadataManager, inventoryView, server);
}
Also used : Response(javax.ws.rs.core.Response) SegmentId(org.apache.druid.timeline.SegmentId) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) Test(org.junit.Test)

Aggregations

SegmentId (org.apache.druid.timeline.SegmentId)63 DataSegment (org.apache.druid.timeline.DataSegment)32 Test (org.junit.Test)21 Interval (org.joda.time.Interval)14 ISE (org.apache.druid.java.util.common.ISE)13 ArrayList (java.util.ArrayList)12 Map (java.util.Map)12 Set (java.util.Set)12 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)12 List (java.util.List)11 ImmutableMap (com.google.common.collect.ImmutableMap)10 IOException (java.io.IOException)9 TreeMap (java.util.TreeMap)9 CountDownLatch (java.util.concurrent.CountDownLatch)9 VisibleForTesting (com.google.common.annotations.VisibleForTesting)8 Collectors (java.util.stream.Collectors)8 Optional (java.util.Optional)7 Sets (com.google.common.collect.Sets)6 Nullable (javax.annotation.Nullable)6 Response (javax.ws.rs.core.Response)6