use of io.druid.client.DruidServer in project druid by druid-io.
the class DatasourcesResource method getDataSource.
private DruidDataSource getDataSource(final String dataSourceName) {
Iterable<DruidDataSource> dataSources = Iterables.concat(Iterables.transform(serverInventoryView.getInventory(), new Function<DruidServer, DruidDataSource>() {
@Override
public DruidDataSource apply(DruidServer input) {
return input.getDataSource(dataSourceName);
}
}));
List<DruidDataSource> validDataSources = Lists.newArrayList();
for (DruidDataSource dataSource : dataSources) {
if (dataSource != null) {
validDataSources.add(dataSource);
}
}
if (validDataSources.isEmpty()) {
return null;
}
Map<String, DataSegment> segmentMap = Maps.newHashMap();
for (DruidDataSource dataSource : validDataSources) {
if (dataSource != null) {
Iterable<DataSegment> segments = dataSource.getSegments();
for (DataSegment segment : segments) {
segmentMap.put(segment.getIdentifier(), segment);
}
}
}
return new DruidDataSource(dataSourceName, ImmutableMap.<String, String>of()).addSegments(segmentMap);
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class ServersResource method getServerSegments.
@GET
@Path("/{serverName}/segments")
@Produces(MediaType.APPLICATION_JSON)
public Response getServerSegments(@PathParam("serverName") String serverName, @QueryParam("full") String full) {
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
DruidServer server = serverInventoryView.getInventoryValue(serverName);
if (server == null) {
return Response.status(Response.Status.NOT_FOUND).build();
}
if (full != null) {
return builder.entity(server.getSegments().values()).build();
}
return builder.entity(Collections2.transform(server.getSegments().values(), new Function<DataSegment, String>() {
@Override
public String apply(DataSegment segment) {
return segment.getIdentifier();
}
})).build();
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class TiersResource method getTiers.
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response getTiers(@QueryParam("simple") String simple) {
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
if (simple != null) {
Map<String, Map<String, Long>> metadata = Maps.newHashMap();
for (DruidServer druidServer : serverInventoryView.getInventory()) {
Map<String, Long> tierMetadata = metadata.get(druidServer.getTier());
if (tierMetadata == null) {
tierMetadata = Maps.newHashMap();
metadata.put(druidServer.getTier(), tierMetadata);
}
Long currSize = tierMetadata.get("currSize");
tierMetadata.put("currSize", ((currSize == null) ? 0 : currSize) + druidServer.getCurrSize());
Long maxSize = tierMetadata.get("maxSize");
tierMetadata.put("maxSize", ((maxSize == null) ? 0 : maxSize) + druidServer.getMaxSize());
}
return builder.entity(metadata).build();
}
Set<String> tiers = Sets.newHashSet();
for (DruidServer server : serverInventoryView.getInventory()) {
tiers.add(server.getTier());
}
return builder.entity(tiers).build();
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
taskMaster = EasyMock.createMock(LoadQueueTaskMaster.class);
druidServer = EasyMock.createMock(DruidServer.class);
serverInventoryView = EasyMock.createMock(SingleServerInventoryView.class);
databaseSegmentManager = EasyMock.createNiceMock(MetadataSegmentManager.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.anyString(), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(new CoordinatorDynamicConfig.Builder().build())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, new Duration(COORDINATOR_PERIOD), null, 10, null, false, false, new Duration("PT0s"));
pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
loadQueuePeon = new LoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
loadQueuePeon.start();
druidNode = new DruidNode("hey", "what", 1234);
loadManagementPeons = new MapMaker().makeMap();
scheduledExecutorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
return Executors.newSingleThreadScheduledExecutor();
}
};
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, databaseSegmentManager, serverInventoryView, metadataRuleManager, curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, taskMaster, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new CostBalancerStrategyFactory());
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidCoordinatorBalancerProfiler method bigProfiler.
public void bigProfiler() {
Stopwatch watch = Stopwatch.createUnstarted();
int numSegments = 55000;
int numServers = 50;
EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules)).anyTimes();
EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
EasyMock.replay(manager);
coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(coordinator);
List<DruidServer> serverList = Lists.newArrayList();
Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
List<ServerHolder> serverHolderList = Lists.newArrayList();
Map<String, DataSegment> segmentMap = Maps.newHashMap();
for (int i = 0; i < numSegments; i++) {
segmentMap.put("segment" + i, new DataSegment("datasource" + i, new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)), (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 4L));
}
for (int i = 0; i < numServers; i++) {
ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
if (i == 0) {
EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
} else {
EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
}
EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(server);
LoadQueuePeon peon = new LoadQueuePeonTester();
peonMap.put(Integer.toString(i), peon);
serverHolderList.add(new ServerHolder(server, peon));
}
DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(serverHolderList)))).withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).withReplicantLifetime(500).withReplicationThrottleLimit(5).build()).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter).withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500)).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(serverHolderList))))).build();
DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
watch.start();
DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
DruidCoordinatorRuntimeParams assignParams = runner.run(params);
System.out.println(watch.stop());
}
Aggregations