use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class TierSelectorStrategyTest method testEmptyCustomPriorityTierSelectorStrategy.
@Test
public void testEmptyCustomPriorityTierSelectorStrategy() {
DirectDruidClient client = EasyMock.createMock(DirectDruidClient.class);
QueryableDruidServer lowPriority = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, -1), client);
QueryableDruidServer mediumPriority = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0), client);
QueryableDruidServer highPriority = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 1), client);
testTierSelectorStrategy(new CustomTierSelectorStrategy(new ConnectionCountServerSelectorStrategy(), new CustomTierSelectorStrategyConfig() {
@Override
public List<Integer> getPriorities() {
return new ArrayList<>();
}
}), highPriority, mediumPriority, lowPriority);
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class DatasourceOptimizerTest method setUp.
@Before
public void setUp() throws Exception {
TestDerbyConnector derbyConnector = derbyConnectorRule.getConnector();
derbyConnector.createDataSourceTable();
derbyConnector.createSegmentTable();
MaterializedViewConfig viewConfig = new MaterializedViewConfig();
jsonMapper = TestHelper.makeJsonMapper();
jsonMapper.registerSubtypes(new NamedType(DerivativeDataSourceMetadata.class, "view"));
metadataStorageCoordinator = EasyMock.createMock(IndexerSQLMetadataStorageCoordinator.class);
derivativesManager = new DerivativeDataSourceManager(viewConfig, derbyConnectorRule.metadataTablesConfigSupplier(), jsonMapper, derbyConnector);
metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(jsonMapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnector);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
zkPathsConfig = new ZkPathsConfig();
setupViews();
druidServer = new DruidServer("localhost:1234", "localhost:1234", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
setupZNodeForServer(druidServer, new ZkPathsConfig(), jsonMapper);
optimizer = new DataSourceOptimizer(brokerServerView);
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class MovingAverageQueryTest method testQuery.
/**
* Validate that the specified query behaves correctly.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
Assert.assertNotNull(expectedResults);
Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {
@Override
public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
return Optional.empty();
}
@Override
public List<ImmutableDruidServer> getDruidServers() {
return null;
}
@Override
public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
return null;
}
@Override
public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
}
@Override
public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
}
@Override
public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
}
}, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {
@Override
public long getMaxQueuedBytes() {
return 0L;
}
}, new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
}, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {
@Override
public void emit(Event event) {
}
}, baseClient, null, /* local client; unused in this test, so pass in null */
warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
defineMocks();
QueryPlus queryPlus = QueryPlus.wrap(query);
final Sequence<?> res = query.getRunner(walker).run(queryPlus);
List actualResults = new ArrayList();
actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
expectedResults = consistentTypeCasting(expectedResults);
actualResults = consistentTypeCasting(actualResults);
Assert.assertEquals(expectedResults, actualResults);
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class DataSourcesResource method getSimpleDatasource.
private Map<String, Map<String, Object>> getSimpleDatasource(String dataSourceName) {
Map<String, Object> tiers = new HashMap<>();
Map<String, Object> segments = new HashMap<>();
Map<String, Map<String, Object>> retVal = ImmutableMap.of("tiers", tiers, "segments", segments);
Set<SegmentId> totalDistinctSegments = new HashSet<>();
Map<String, HashSet<Object>> tierDistinctSegments = new HashMap<>();
long totalSegmentSize = 0;
long totalReplicatedSize = 0;
DateTime minTime = DateTimes.MAX;
DateTime maxTime = DateTimes.MIN;
String tier;
for (DruidServer druidServer : serverInventoryView.getInventory()) {
DruidDataSource druidDataSource = druidServer.getDataSource(dataSourceName);
tier = druidServer.getTier();
if (druidDataSource == null) {
continue;
}
tierDistinctSegments.computeIfAbsent(tier, t -> new HashSet<>());
long dataSourceSegmentSize = 0;
long replicatedSegmentSize = 0;
for (DataSegment dataSegment : druidDataSource.getSegments()) {
// tier segments stats
if (!tierDistinctSegments.get(tier).contains(dataSegment.getId())) {
dataSourceSegmentSize += dataSegment.getSize();
tierDistinctSegments.get(tier).add(dataSegment.getId());
}
// total segments stats
if (totalDistinctSegments.add(dataSegment.getId())) {
totalSegmentSize += dataSegment.getSize();
minTime = DateTimes.min(minTime, dataSegment.getInterval().getStart());
maxTime = DateTimes.max(maxTime, dataSegment.getInterval().getEnd());
}
totalReplicatedSize += dataSegment.getSize();
replicatedSegmentSize += dataSegment.getSize();
}
// tier stats
Map<String, Object> tierStats = (Map) tiers.get(tier);
if (tierStats == null) {
tierStats = new HashMap<>();
tiers.put(druidServer.getTier(), tierStats);
}
tierStats.put("segmentCount", tierDistinctSegments.get(tier).size());
long segmentSize = MapUtils.getLong(tierStats, "size", 0L);
tierStats.put("size", segmentSize + dataSourceSegmentSize);
long replicatedSize = MapUtils.getLong(tierStats, "replicatedSize", 0L);
tierStats.put("replicatedSize", replicatedSize + replicatedSegmentSize);
}
segments.put("count", totalDistinctSegments.size());
segments.put("size", totalSegmentSize);
segments.put("replicatedSize", totalReplicatedSize);
segments.put("minTime", minTime);
segments.put("maxTime", maxTime);
return retVal;
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class DataSourcesResource method getServersWhereSegmentIsServed.
@Nullable
private Pair<DataSegment, Set<String>> getServersWhereSegmentIsServed(SegmentId segmentId) {
DataSegment theSegment = null;
Set<String> servers = new HashSet<>();
for (DruidServer druidServer : serverInventoryView.getInventory()) {
DataSegment currSegment = druidServer.getSegment(segmentId);
if (currSegment != null) {
theSegment = currSegment;
servers.add(druidServer.getHost());
}
}
if (theSegment == null) {
return null;
}
return new Pair<>(theSegment, servers);
}
Aggregations