use of io.druid.java.util.common.concurrent.ScheduledExecutorFactory in project druid by druid-io.
the class DruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() throws Exception {
loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
EasyMock.replay(loadQueuePeon);
segment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.replay(segment);
scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
EasyMock.replay(scheduledExecutorFactory);
EasyMock.replay(metadataRuleManager);
EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment", segment))).atLeastOnce();
EasyMock.replay(druidServer);
druidServer2 = EasyMock.createMock(DruidServer.class);
EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment2", segment))).atLeastOnce();
EasyMock.replay(druidServer2);
loadManagementPeons.put("from", loadQueuePeon);
loadManagementPeons.put("to", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventoryManagerConfig()).andReturn(new InventoryManagerConfig() {
@Override
public String getContainerPath() {
return "";
}
@Override
public String getInventoryPath() {
return "";
}
});
EasyMock.replay(serverInventoryView);
coordinator.moveSegment(druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), "dummySegment", null);
EasyMock.verify(druidServer);
EasyMock.verify(druidServer2);
EasyMock.verify(loadQueuePeon);
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
use of io.druid.java.util.common.concurrent.ScheduledExecutorFactory in project druid by druid-io.
the class CliCoordinator method getModules.
@Override
protected List<? extends Module> getModules() {
List<Module> modules = new ArrayList<>();
modules.add(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to(TieredBrokerConfig.DEFAULT_COORDINATOR_SERVICE_NAME);
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(8081);
ConfigProvider.bind(binder, DruidCoordinatorConfig.class);
binder.bind(MetadataStorage.class).toProvider(MetadataStorageProvider.class);
JsonConfigProvider.bind(binder, "druid.manager.segments", MetadataSegmentManagerConfig.class);
JsonConfigProvider.bind(binder, "druid.manager.rules", MetadataRuleManagerConfig.class);
JsonConfigProvider.bind(binder, "druid.manager.lookups", LookupCoordinatorManagerConfig.class);
JsonConfigProvider.bind(binder, "druid.coordinator.balancer", BalancerStrategyFactory.class);
binder.bind(RedirectFilter.class).in(LazySingleton.class);
if (beOverlord) {
binder.bind(RedirectInfo.class).to(CoordinatorOverlordRedirectInfo.class).in(LazySingleton.class);
} else {
binder.bind(RedirectInfo.class).to(CoordinatorRedirectInfo.class).in(LazySingleton.class);
}
binder.bind(MetadataSegmentManager.class).toProvider(MetadataSegmentManagerProvider.class).in(ManageLifecycle.class);
binder.bind(MetadataRuleManager.class).toProvider(MetadataRuleManagerProvider.class).in(ManageLifecycle.class);
binder.bind(AuditManager.class).toProvider(AuditManagerProvider.class).in(ManageLifecycle.class);
binder.bind(IndexingServiceClient.class).in(LazySingleton.class);
binder.bind(CoordinatorServerView.class).in(LazySingleton.class);
binder.bind(DruidCoordinator.class);
binder.bind(LookupCoordinatorManager.class).in(ManageLifecycle.class);
binder.bind(ListenerDiscoverer.class).in(ManageLifecycle.class);
LifecycleModule.register(binder, ListenerDiscoverer.class);
LifecycleModule.register(binder, MetadataStorage.class);
LifecycleModule.register(binder, DruidCoordinator.class);
LifecycleModule.register(binder, LookupCoordinatorManager.class);
binder.bind(JettyServerInitializer.class).to(CoordinatorJettyServerInitializer.class);
Jerseys.addResource(binder, CoordinatorResource.class);
Jerseys.addResource(binder, CoordinatorDynamicConfigsResource.class);
Jerseys.addResource(binder, TiersResource.class);
Jerseys.addResource(binder, RulesResource.class);
Jerseys.addResource(binder, ServersResource.class);
Jerseys.addResource(binder, DatasourcesResource.class);
Jerseys.addResource(binder, MetadataResource.class);
Jerseys.addResource(binder, IntervalsResource.class);
Jerseys.addResource(binder, LookupCoordinatorResource.class);
LifecycleModule.register(binder, Server.class);
LifecycleModule.register(binder, DatasourcesResource.class);
ConditionalMultibind.create(properties, binder, DruidCoordinatorHelper.class, CoordinatorIndexingServiceHelper.class).addConditionBinding("druid.coordinator.merge.on", Predicates.equalTo("true"), DruidCoordinatorSegmentMerger.class).addConditionBinding("druid.coordinator.conversion.on", Predicates.equalTo("true"), DruidCoordinatorVersionConverter.class).addConditionBinding("druid.coordinator.kill.on", Predicates.equalTo("true"), DruidCoordinatorSegmentKiller.class);
}
@Provides
@LazySingleton
public LoadQueueTaskMaster getLoadQueueTaskMaster(CuratorFramework curator, ObjectMapper jsonMapper, ScheduledExecutorFactory factory, DruidCoordinatorConfig config) {
return new LoadQueueTaskMaster(curator, jsonMapper, factory.create(1, "Master-PeonExec--%d"), Executors.newSingleThreadExecutor(), config);
}
});
if (beOverlord) {
modules.addAll(new CliOverlord().getModules(false));
}
return modules;
}
use of io.druid.java.util.common.concurrent.ScheduledExecutorFactory in project druid by druid-io.
the class RemoteTaskRunnerFactoryTest method testExecNotSharedBetweenRunners.
@Test
public void testExecNotSharedBetweenRunners() {
final AtomicInteger executorCount = new AtomicInteger(0);
RemoteTaskRunnerConfig config = new RemoteTaskRunnerConfig();
IndexerZkConfig indexerZkConfig = new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return basePath;
}
}, null, null, null, null, null);
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
Supplier<WorkerBehaviorConfig> workerBehaviorConfig = EasyMock.createMock(Supplier.class);
ScheduledExecutorFactory executorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int i, String s) {
executorCount.incrementAndGet();
return ScheduledExecutors.fixed(i, s);
}
};
SimpleWorkerResourceManagementConfig resourceManagementConfig = new SimpleWorkerResourceManagementConfig();
ResourceManagementSchedulerConfig resourceManagementSchedulerConfig = new ResourceManagementSchedulerConfig() {
@Override
public boolean isDoAutoscale() {
return true;
}
};
RemoteTaskRunnerFactory factory = new RemoteTaskRunnerFactory(cf, config, indexerZkConfig, jsonMapper, httpClient, workerBehaviorConfig, executorFactory, resourceManagementSchedulerConfig, new SimpleWorkerResourceManagementStrategy(resourceManagementConfig, workerBehaviorConfig, resourceManagementSchedulerConfig, executorFactory));
Assert.assertEquals(1, executorCount.get());
RemoteTaskRunner remoteTaskRunner1 = factory.build();
Assert.assertEquals(2, executorCount.get());
RemoteTaskRunner remoteTaskRunner2 = factory.build();
Assert.assertEquals(3, executorCount.get());
}
use of io.druid.java.util.common.concurrent.ScheduledExecutorFactory in project druid by druid-io.
the class DruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
taskMaster = EasyMock.createMock(LoadQueueTaskMaster.class);
druidServer = EasyMock.createMock(DruidServer.class);
serverInventoryView = EasyMock.createMock(SingleServerInventoryView.class);
databaseSegmentManager = EasyMock.createNiceMock(MetadataSegmentManager.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.anyString(), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(new CoordinatorDynamicConfig.Builder().build())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(LOADPATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, new Duration(COORDINATOR_PERIOD), null, 10, null, false, false, new Duration("PT0s"));
pathChildrenCache = new PathChildrenCache(curator, LOADPATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache-%d"));
loadQueuePeon = new LoadQueuePeon(curator, LOADPATH, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon-%d"), druidCoordinatorConfig);
loadQueuePeon.start();
druidNode = new DruidNode("hey", "what", 1234);
loadManagementPeons = new MapMaker().makeMap();
scheduledExecutorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, final String nameFormat) {
return Executors.newSingleThreadScheduledExecutor();
}
};
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, databaseSegmentManager, serverInventoryView, metadataRuleManager, curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, taskMaster, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new CostBalancerStrategyFactory());
}
use of io.druid.java.util.common.concurrent.ScheduledExecutorFactory in project druid by druid-io.
the class ZkCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
try {
infoDir = new File(File.createTempFile("blah", "blah2").getParent(), "ZkCoordinatorTest");
infoDir.mkdirs();
for (File file : infoDir.listFiles()) {
file.delete();
}
log.info("Creating tmp test files in [%s]", infoDir);
} catch (IOException e) {
throw new RuntimeException(e);
}
scheduledRunnable = Lists.newArrayList();
segmentLoader = new CacheTestSegmentLoader();
serverManager = new ServerManager(segmentLoader, new NoopQueryRunnerFactoryConglomerate(), new NoopServiceEmitter(), MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
final ZkPathsConfig zkPaths = new ZkPathsConfig() {
@Override
public String getBase() {
return "/druid";
}
};
segmentsAnnouncedByMe = new ConcurrentSkipListSet<>();
announceCount = new AtomicInteger(0);
announcer = new DataSegmentAnnouncer() {
private final DataSegmentAnnouncer delegate = new BatchDataSegmentAnnouncer(me, new BatchDataSegmentAnnouncerConfig(), zkPaths, new Announcer(curator, Execs.singleThreaded("blah")), jsonMapper);
@Override
public void announceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.add(segment);
announceCount.incrementAndGet();
delegate.announceSegment(segment);
}
@Override
public void unannounceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.remove(segment);
announceCount.decrementAndGet();
delegate.unannounceSegment(segment);
}
@Override
public void announceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.add(segment);
}
announceCount.addAndGet(Iterables.size(segments));
delegate.announceSegments(segments);
}
@Override
public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.remove(segment);
}
announceCount.addAndGet(-Iterables.size(segments));
delegate.unannounceSegments(segments);
}
@Override
public boolean isAnnounced(DataSegment segment) {
return segmentsAnnouncedByMe.contains(segment);
}
};
zkCoordinator = new ZkCoordinator(jsonMapper, new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return infoDir;
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
}, zkPaths, me, announcer, curator, serverManager, new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, String nameFormat) {
/*
Override normal behavoir by adding the runnable to a list so that you can make sure
all the shceduled runnables are executed by explicitly calling run() on each item in the list
*/
return new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(nameFormat).build()) {
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
scheduledRunnable.add(command);
return null;
}
};
}
});
}
Aggregations