use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidSchema method start.
@LifecycleStart
public void start() {
cacheExec.submit(new Runnable() {
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final Set<String> dataSources = Sets.newHashSet();
try {
synchronized (lock) {
final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
}
dataSources.addAll(dataSourcesNeedingRefresh);
dataSourcesNeedingRefresh.clear();
lastRefresh = System.currentTimeMillis();
refreshImmediately = false;
}
// Refresh dataSources.
for (final String dataSource : dataSources) {
log.debug("Refreshing metadata for dataSource[%s].", dataSource);
final long startTime = System.currentTimeMillis();
final DruidTable druidTable = computeTable(dataSource);
if (druidTable == null) {
if (tables.remove(dataSource) != null) {
log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
}
} else {
tables.put(dataSource, druidTable);
log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
}
}
initializationLatch.countDown();
} catch (InterruptedException e) {
// Fall through.
throw e;
} catch (Exception e) {
log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
synchronized (lock) {
// Add dataSources back to the refresh list.
dataSourcesNeedingRefresh.addAll(dataSources);
lock.notifyAll();
}
}
}
} catch (InterruptedException e) {
// Just exit.
} catch (Throwable e) {
// Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
// OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
log.makeAlert(e, "Metadata refresh failed permanently").emit();
throw e;
} finally {
log.info("Metadata refresh stopped.");
}
}
});
serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentViewInitialized() {
synchronized (lock) {
isServerViewInitialized = true;
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
if (!tables.containsKey(segment.getDataSource())) {
refreshImmediately = true;
}
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
final List<String> dataSourceNames = Lists.newArrayList();
for (DruidDataSource druidDataSource : server.getDataSources()) {
dataSourceNames.add(druidDataSource.getName());
}
synchronized (lock) {
dataSourcesNeedingRefresh.addAll(dataSourceNames);
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
taskMaster = EasyMock.createMock(LoadQueueTaskMaster.class);
druidServer = EasyMock.createMock(DruidServer.class);
serverInventoryView = EasyMock.createMock(SingleServerInventoryView.class);
databaseSegmentManager = EasyMock.createNiceMock(MetadataSegmentManager.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.anyString(), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(new CoordinatorDynamicConfig.Builder().build())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, new Duration(COORDINATOR_PERIOD), null, 10, null, false, false, new Duration("PT0s"));
pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
loadQueuePeon = new LoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
loadQueuePeon.start();
druidNode = new DruidNode("hey", "what", 1234);
loadManagementPeons = new MapMaker().makeMap();
scheduledExecutorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
return Executors.newSingleThreadScheduledExecutor();
}
};
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, databaseSegmentManager, serverInventoryView, metadataRuleManager, curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, taskMaster, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new CostBalancerStrategyFactory());
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidCoordinatorBalancerProfiler method bigProfiler.
public void bigProfiler() {
Stopwatch watch = Stopwatch.createUnstarted();
int numSegments = 55000;
int numServers = 50;
EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules)).anyTimes();
EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
EasyMock.replay(manager);
coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(coordinator);
List<DruidServer> serverList = Lists.newArrayList();
Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
List<ServerHolder> serverHolderList = Lists.newArrayList();
Map<String, DataSegment> segmentMap = Maps.newHashMap();
for (int i = 0; i < numSegments; i++) {
segmentMap.put("segment" + i, new DataSegment("datasource" + i, new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)), (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 4L));
}
for (int i = 0; i < numServers; i++) {
ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
if (i == 0) {
EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
} else {
EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
}
EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(server);
LoadQueuePeon peon = new LoadQueuePeonTester();
peonMap.put(Integer.toString(i), peon);
serverHolderList.add(new ServerHolder(server, peon));
}
DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(serverHolderList)))).withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).withReplicantLifetime(500).withReplicationThrottleLimit(5).build()).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter).withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500)).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(serverHolderList))))).build();
DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
watch.start();
DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
DruidCoordinatorRuntimeParams assignParams = runner.run(params);
System.out.println(watch.stop());
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidCoordinatorRuleRunnerTest method testRunThreeTiersOneReplicant.
/**
* Nodes:
* hot - 1 replicant
* normal - 1 replicant
* cold - 1 replicant
*
* @throws Exception
*/
@Test
public void testRunThreeTiersOneReplicant() throws Exception {
mockCoordinator();
mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
EasyMock.replay(mockPeon);
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T06:00:00.000Z"), ImmutableMap.<String, Integer>of("hot", 1)), new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("normal", 1)), new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z"), ImmutableMap.<String, Integer>of("cold", 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("hot", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverHot", "hostHot", 1000, "historical", "hot", 0).toImmutableDruidServer(), mockPeon))), "normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverNorm", "hostNorm", 1000, "historical", "normal", 0).toImmutableDruidServer(), mockPeon))), "cold", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverCold", "hostCold", 1000, "historical", "cold", 0).toImmutableDruidServer(), mockPeon)))));
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(5).build()).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("hot").get() == 6);
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("normal").get() == 6);
Assert.assertTrue(stats.getPerTierStats().get("assignedCount").get("cold").get() == 12);
Assert.assertTrue(stats.getPerTierStats().get("unassignedCount") == null);
Assert.assertTrue(stats.getPerTierStats().get("unassignedSize") == null);
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of io.druid.client.DruidServer in project druid by druid-io.
the class DruidCoordinatorRuleRunnerTest method testDropRemove.
@Test
public void testDropRemove() throws Exception {
mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
EasyMock.replay(mockPeon);
EasyMock.expect(coordinator.getDynamicConfigs()).andReturn(new CoordinatorDynamicConfig(0, 0, 0, 0, 1, 24, 0, false, null, false)).anyTimes();
coordinator.removeSegment(EasyMock.<DataSegment>anyObject());
EasyMock.expectLastCall().atLeastOnce();
EasyMock.replay(coordinator);
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("normal", 1)), new IntervalDropRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidServer server = new DruidServer("serverNorm", "hostNorm", 1000, "historical", "normal", 0);
for (DataSegment segment : availableSegments) {
server.addDataSegment(segment.getIdentifier(), segment);
}
DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server.toImmutableDruidServer(), mockPeon)))));
SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster);
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMillisToWaitBeforeDeleting(0L).build()).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertTrue(stats.getGlobalStats().get("deletedCount").get() == 12);
exec.shutdown();
EasyMock.verify(coordinator);
}
Aggregations