Search in sources :

Example 36 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class TierSelectorStrategyTest method testEmptyCustomPriorityTierSelectorStrategy.

@Test
public void testEmptyCustomPriorityTierSelectorStrategy() {
    DirectDruidClient client = EasyMock.createMock(DirectDruidClient.class);
    QueryableDruidServer lowPriority = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, -1), client);
    QueryableDruidServer mediumPriority = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0), client);
    QueryableDruidServer highPriority = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 1), client);
    testTierSelectorStrategy(new CustomTierSelectorStrategy(new ConnectionCountServerSelectorStrategy(), new CustomTierSelectorStrategyConfig() {

        @Override
        public List<Integer> getPriorities() {
            return new ArrayList<>();
        }
    }), highPriority, mediumPriority, lowPriority);
}
Also used : DirectDruidClient(org.apache.druid.client.DirectDruidClient) ArrayList(java.util.ArrayList) DruidServer(org.apache.druid.client.DruidServer) Test(org.junit.Test)

Example 37 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class DatasourceOptimizerTest method setUp.

@Before
public void setUp() throws Exception {
    TestDerbyConnector derbyConnector = derbyConnectorRule.getConnector();
    derbyConnector.createDataSourceTable();
    derbyConnector.createSegmentTable();
    MaterializedViewConfig viewConfig = new MaterializedViewConfig();
    jsonMapper = TestHelper.makeJsonMapper();
    jsonMapper.registerSubtypes(new NamedType(DerivativeDataSourceMetadata.class, "view"));
    metadataStorageCoordinator = EasyMock.createMock(IndexerSQLMetadataStorageCoordinator.class);
    derivativesManager = new DerivativeDataSourceManager(viewConfig, derbyConnectorRule.metadataTablesConfigSupplier(), jsonMapper, derbyConnector);
    metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(jsonMapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnector);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    zkPathsConfig = new ZkPathsConfig();
    setupViews();
    druidServer = new DruidServer("localhost:1234", "localhost:1234", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
    setupZNodeForServer(druidServer, new ZkPathsConfig(), jsonMapper);
    optimizer = new DataSourceOptimizer(brokerServerView);
}
Also used : IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) NamedType(com.fasterxml.jackson.databind.jsontype.NamedType) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DruidServer(org.apache.druid.client.DruidServer) TestDerbyConnector(org.apache.druid.metadata.TestDerbyConnector) Before(org.junit.Before)

Example 38 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class MovingAverageQueryTest method testQuery.

/**
 * Validate that the specified query behaves correctly.
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
    Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
    Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
    List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
    Assert.assertNotNull(expectedResults);
    Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
    CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {

        @Override
        public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
            return Optional.empty();
        }

        @Override
        public List<ImmutableDruidServer> getDruidServers() {
            return null;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
            return null;
        }

        @Override
        public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
        }

        @Override
        public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
        }

        @Override
        public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
        }
    }, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {

        @Override
        public long getMaxQueuedBytes() {
            return 0L;
        }
    }, new DruidProcessingConfig() {

        @Override
        public String getFormatString() {
            return null;
        }
    }, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
    ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {

        @Override
        public void emit(Event event) {
        }
    }, baseClient, null, /* local client; unused in this test, so pass in null */
    warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
    defineMocks();
    QueryPlus queryPlus = QueryPlus.wrap(query);
    final Sequence<?> res = query.getRunner(walker).run(queryPlus);
    List actualResults = new ArrayList();
    actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
    expectedResults = consistentTypeCasting(expectedResults);
    actualResults = consistentTypeCasting(actualResults);
    Assert.assertEquals(expectedResults, actualResults);
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) ArrayList(java.util.ArrayList) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Executor(java.util.concurrent.Executor) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) List(java.util.List) ArrayList(java.util.ArrayList) TimelineServerView(org.apache.druid.client.TimelineServerView) CacheConfig(org.apache.druid.client.cache.CacheConfig) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) QueryPlus(org.apache.druid.query.QueryPlus) CachingClusteredClient(org.apache.druid.client.CachingClusteredClient) Optional(java.util.Optional) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) QueryRunner(org.apache.druid.query.QueryRunner) ClientQuerySegmentWalker(org.apache.druid.server.ClientQuerySegmentWalker) Event(org.apache.druid.java.util.emitter.core.Event) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) TimelineLookup(org.apache.druid.timeline.TimelineLookup) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 39 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class DataSourcesResource method getSimpleDatasource.

private Map<String, Map<String, Object>> getSimpleDatasource(String dataSourceName) {
    Map<String, Object> tiers = new HashMap<>();
    Map<String, Object> segments = new HashMap<>();
    Map<String, Map<String, Object>> retVal = ImmutableMap.of("tiers", tiers, "segments", segments);
    Set<SegmentId> totalDistinctSegments = new HashSet<>();
    Map<String, HashSet<Object>> tierDistinctSegments = new HashMap<>();
    long totalSegmentSize = 0;
    long totalReplicatedSize = 0;
    DateTime minTime = DateTimes.MAX;
    DateTime maxTime = DateTimes.MIN;
    String tier;
    for (DruidServer druidServer : serverInventoryView.getInventory()) {
        DruidDataSource druidDataSource = druidServer.getDataSource(dataSourceName);
        tier = druidServer.getTier();
        if (druidDataSource == null) {
            continue;
        }
        tierDistinctSegments.computeIfAbsent(tier, t -> new HashSet<>());
        long dataSourceSegmentSize = 0;
        long replicatedSegmentSize = 0;
        for (DataSegment dataSegment : druidDataSource.getSegments()) {
            // tier segments stats
            if (!tierDistinctSegments.get(tier).contains(dataSegment.getId())) {
                dataSourceSegmentSize += dataSegment.getSize();
                tierDistinctSegments.get(tier).add(dataSegment.getId());
            }
            // total segments stats
            if (totalDistinctSegments.add(dataSegment.getId())) {
                totalSegmentSize += dataSegment.getSize();
                minTime = DateTimes.min(minTime, dataSegment.getInterval().getStart());
                maxTime = DateTimes.max(maxTime, dataSegment.getInterval().getEnd());
            }
            totalReplicatedSize += dataSegment.getSize();
            replicatedSegmentSize += dataSegment.getSize();
        }
        // tier stats
        Map<String, Object> tierStats = (Map) tiers.get(tier);
        if (tierStats == null) {
            tierStats = new HashMap<>();
            tiers.put(druidServer.getTier(), tierStats);
        }
        tierStats.put("segmentCount", tierDistinctSegments.get(tier).size());
        long segmentSize = MapUtils.getLong(tierStats, "size", 0L);
        tierStats.put("size", segmentSize + dataSourceSegmentSize);
        long replicatedSize = MapUtils.getLong(tierStats, "replicatedSize", 0L);
        tierStats.put("replicatedSize", replicatedSize + replicatedSegmentSize);
    }
    segments.put("count", totalDistinctSegments.size());
    segments.put("size", totalSegmentSize);
    segments.put("replicatedSize", totalReplicatedSize);
    segments.put("minTime", minTime);
    segments.put("maxTime", maxTime);
    return retVal;
}
Also used : HashMap(java.util.HashMap) SegmentId(org.apache.druid.timeline.SegmentId) DruidServer(org.apache.druid.client.DruidServer) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) EnumMap(java.util.EnumMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet)

Example 40 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class DataSourcesResource method getServersWhereSegmentIsServed.

@Nullable
private Pair<DataSegment, Set<String>> getServersWhereSegmentIsServed(SegmentId segmentId) {
    DataSegment theSegment = null;
    Set<String> servers = new HashSet<>();
    for (DruidServer druidServer : serverInventoryView.getInventory()) {
        DataSegment currSegment = druidServer.getSegment(segmentId);
        if (currSegment != null) {
            theSegment = currSegment;
            servers.add(druidServer.getHost());
        }
    }
    if (theSegment == null) {
        return null;
    }
    return new Pair<>(theSegment, servers);
}
Also used : DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) HashSet(java.util.HashSet) Pair(org.apache.druid.java.util.common.Pair) Nullable(javax.annotation.Nullable)

Aggregations

DruidServer (org.apache.druid.client.DruidServer)73 Test (org.junit.Test)57 DataSegment (org.apache.druid.timeline.DataSegment)43 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)21 IntervalLoadRule (org.apache.druid.server.coordinator.rules.IntervalLoadRule)18 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)17 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)14 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)12 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)12 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)11 HashMap (java.util.HashMap)9 Object2LongMap (it.unimi.dsi.fastutil.objects.Object2LongMap)8 ArrayList (java.util.ArrayList)8 Response (javax.ws.rs.core.Response)8 ForeverLoadRule (org.apache.druid.server.coordinator.rules.ForeverLoadRule)8 HashSet (java.util.HashSet)7 Map (java.util.Map)7 CountDownLatch (java.util.concurrent.CountDownLatch)7 DirectDruidClient (org.apache.druid.client.DirectDruidClient)7 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)7