Search in sources :

Example 16 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BalancerStrategyBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    switch(mode) {
        case "50percentOfSegmentsToConsiderPerMove":
            percentOfSegmentsToConsider = 50;
            useBatchedSegmentSampler = false;
            break;
        case "useBatchedSegmentSampler":
            reservoirSize = maxSegmentsToMove;
            useBatchedSegmentSampler = true;
            break;
        default:
    }
    List<List<DataSegment>> segmentList = new ArrayList<>(NUMBER_OF_SERVERS);
    IntStream.range(0, NUMBER_OF_SERVERS).forEach(i -> segmentList.add(new ArrayList<>()));
    for (int i = 0; i < numberOfSegments; i++) {
        segmentList.get(RANDOM.nextInt(NUMBER_OF_SERVERS)).add(new DataSegment("test", TEST_SEGMENT_INTERVAL, String.valueOf(i), Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), null, 0, 10L));
    }
    for (List<DataSegment> segments : segmentList) {
        serverHolders.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("id", "host", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("test", new ImmutableDruidDataSource("test", Collections.emptyMap(), segments)), segments.size()), new LoadQueuePeonTester()));
    }
}
Also used : ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Setup(org.openjdk.jmh.annotations.Setup)

Example 17 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaInternRowSignatureBenchmark method setup.

@Setup
public void setup() {
    druidSchema = new DruidSchemaForBenchmark(EasyMock.mock(QueryLifecycleFactory.class), EasyMock.mock(TimelineServerView.class), null, null, EasyMock.mock(PlannerConfig.class), null, null);
    DruidServerMetadata serverMetadata = new DruidServerMetadata("dummy", "dummy", "dummy", 42, ServerType.HISTORICAL, "tier-0", 0);
    DataSegment.Builder builder = DataSegment.builder().dataSource("dummy").shardSpec(new LinearShardSpec(0)).dimensions(ImmutableList.of("col1", "col2", "col3", "col4")).version("1").size(0);
    for (int i = 0; i < 10000; ++i) {
        DataSegment dataSegment = builder.interval(Intervals.of(i + "/" + (i + 1))).build();
        druidSchema.addSegment(serverMetadata, dataSegment);
    }
}
Also used : LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) Setup(org.openjdk.jmh.annotations.Setup)

Example 18 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ServerViewUtil method getTargetLocations.

public static List<LocatedSegmentDescriptor> getTargetLocations(TimelineServerView serverView, DataSource datasource, List<Interval> intervals, int numCandidates) {
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(datasource);
    final Optional<? extends TimelineLookup<String, ServerSelector>> maybeTimeline = serverView.getTimeline(analysis);
    if (!maybeTimeline.isPresent()) {
        return Collections.emptyList();
    }
    List<LocatedSegmentDescriptor> located = new ArrayList<>();
    for (Interval interval : intervals) {
        for (TimelineObjectHolder<String, ServerSelector> holder : maybeTimeline.get().lookup(interval)) {
            for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
                ServerSelector selector = chunk.getObject();
                final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
                long size = selector.getSegment().getSize();
                List<DruidServerMetadata> candidates = selector.getCandidates(numCandidates);
                located.add(new LocatedSegmentDescriptor(descriptor, size, candidates));
            }
        }
    }
    return located;
}
Also used : ArrayList(java.util.ArrayList) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) ServerSelector(org.apache.druid.client.selector.ServerSelector) LocatedSegmentDescriptor(org.apache.druid.query.LocatedSegmentDescriptor) LocatedSegmentDescriptor(org.apache.druid.query.LocatedSegmentDescriptor) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Interval(org.joda.time.Interval)

Example 19 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BrokerServerViewTest method setupViews.

private void setupViews(Set<String> watchedTiers, Set<String> ignoredTiers, boolean watchRealtimeTasks) throws Exception {
    baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {

        @Override
        public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
            super.registerSegmentCallback(exec, new SegmentCallback() {

                @Override
                public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
                    CallbackAction res = callback.segmentAdded(server, segment);
                    segmentAddedLatch.countDown();
                    return res;
                }

                @Override
                public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
                    CallbackAction res = callback.segmentRemoved(server, segment);
                    segmentRemovedLatch.countDown();
                    return res;
                }

                @Override
                public CallbackAction segmentViewInitialized() {
                    CallbackAction res = callback.segmentViewInitialized();
                    segmentViewInitLatch.countDown();
                    return res;
                }
            });
        }
    };
    brokerServerView = new BrokerServerView(EasyMock.createMock(QueryToolChestWarehouse.class), EasyMock.createMock(QueryWatcher.class), getSmileMapper(), EasyMock.createMock(HttpClient.class), baseView, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()), new NoopServiceEmitter(), new BrokerSegmentWatcherConfig() {

        @Override
        public Set<String> getWatchedTiers() {
            return watchedTiers;
        }

        @Override
        public boolean isWatchRealtimeTasks() {
            return watchRealtimeTasks;
        }

        @Override
        public Set<String> getIgnoredTiers() {
            return ignoredTiers;
        }
    });
    baseView.start();
}
Also used : NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) Executor(java.util.concurrent.Executor) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy)

Example 20 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class HttpServerInventoryViewTest method testSimple.

@Test(timeout = 60_000L)
public void testSimple() throws Exception {
    ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
    TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
    DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
    EasyMock.expect(druidNodeDiscoveryProvider.getForService(DataNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    final DataSegment segment1 = new DataSegment("test1", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment2 = new DataSegment("test2", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment3 = new DataSegment("test3", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment4 = new DataSegment("test4", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    final DataSegment segment5 = new DataSegment("non-loading-datasource", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
    TestHttpClient httpClient = new TestHttpClient(ImmutableList.of(Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment1)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestDrop(segment1), new SegmentChangeRequestLoad(segment2), new SegmentChangeRequestLoad(segment3)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(true, "force reset counter", ChangeRequestHistory.Counter.ZERO, ImmutableList.of())))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment3), new SegmentChangeRequestLoad(segment4), new SegmentChangeRequestLoad(segment5))))))));
    DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 8080, null, true, false), NodeRole.HISTORICAL, ImmutableMap.of(DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0)));
    HttpServerInventoryView httpServerInventoryView = new HttpServerInventoryView(jsonMapper, httpClient, druidNodeDiscoveryProvider, (pair) -> !pair.rhs.getDataSource().equals("non-loading-datasource"), new HttpServerInventoryViewConfig(null, null, null), "test");
    CountDownLatch initializeCallback1 = new CountDownLatch(1);
    Map<SegmentId, CountDownLatch> segmentAddLathces = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1), segment3.getId(), new CountDownLatch(1), segment4.getId(), new CountDownLatch(1));
    Map<SegmentId, CountDownLatch> segmentDropLatches = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1));
    httpServerInventoryView.registerSegmentCallback(Execs.directExecutor(), new ServerView.SegmentCallback() {

        @Override
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentAddLathces.get(segment.getId()).countDown();
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
            segmentDropLatches.get(segment.getId()).countDown();
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentViewInitialized() {
            initializeCallback1.countDown();
            return ServerView.CallbackAction.CONTINUE;
        }
    });
    final CountDownLatch serverRemovedCalled = new CountDownLatch(1);
    httpServerInventoryView.registerServerRemovedCallback(Execs.directExecutor(), new ServerView.ServerRemovedCallback() {

        @Override
        public ServerView.CallbackAction serverRemoved(DruidServer server) {
            if (server.getName().equals("host:8080")) {
                serverRemovedCalled.countDown();
                return ServerView.CallbackAction.CONTINUE;
            } else {
                throw new RE("Unknown server [%s]", server.getName());
            }
        }
    });
    httpServerInventoryView.start();
    druidNodeDiscovery.listener.nodesAdded(ImmutableList.of(druidNode));
    initializeCallback1.await();
    segmentAddLathces.get(segment1.getId()).await();
    segmentDropLatches.get(segment1.getId()).await();
    segmentAddLathces.get(segment2.getId()).await();
    segmentAddLathces.get(segment3.getId()).await();
    segmentAddLathces.get(segment4.getId()).await();
    segmentDropLatches.get(segment2.getId()).await();
    DruidServer druidServer = httpServerInventoryView.getInventoryValue("host:8080");
    Assert.assertEquals(ImmutableMap.of(segment3.getId(), segment3, segment4.getId(), segment4), Maps.uniqueIndex(druidServer.iterateAllSegments(), DataSegment::getId));
    druidNodeDiscovery.listener.nodesRemoved(ImmutableList.of(druidNode));
    serverRemovedCalled.await();
    Assert.assertNull(httpServerInventoryView.getInventoryValue("host:8080"));
    EasyMock.verify(druidNodeDiscoveryProvider);
    httpServerInventoryView.stop();
}
Also used : DataSegment(org.apache.druid.timeline.DataSegment) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) SegmentChangeRequestLoad(org.apache.druid.server.coordination.SegmentChangeRequestLoad) SegmentId(org.apache.druid.timeline.SegmentId) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) RE(org.apache.druid.java.util.common.RE) SegmentChangeRequestDrop(org.apache.druid.server.coordination.SegmentChangeRequestDrop) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) ByteArrayInputStream(java.io.ByteArrayInputStream) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) ChangeRequestsSnapshot(org.apache.druid.server.coordination.ChangeRequestsSnapshot) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) DataNodeService(org.apache.druid.discovery.DataNodeService) Test(org.junit.Test)

Aggregations

DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)37 DataSegment (org.apache.druid.timeline.DataSegment)28 Test (org.junit.Test)25 CountDownLatch (java.util.concurrent.CountDownLatch)16 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)16 List (java.util.List)14 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)13 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)13 NoopEscalator (org.apache.druid.server.security.NoopEscalator)13 SegmentId (org.apache.druid.timeline.SegmentId)13 Pair (org.apache.druid.java.util.common.Pair)12 ImmutableList (com.google.common.collect.ImmutableList)11 ImmutableMap (com.google.common.collect.ImmutableMap)11 Collectors (java.util.stream.Collectors)11 Intervals (org.apache.druid.java.util.common.Intervals)11 Before (org.junit.Before)11 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Map (java.util.Map)10 TableDataSource (org.apache.druid.query.TableDataSource)10 ImmutableSet (com.google.common.collect.ImmutableSet)9