use of com.google.inject.Singleton in project gerrit by GerritCodeReview.
the class SshAddressesModule method getAdvertisedAddresses.
@Provides
@Singleton
@SshAdvertisedAddresses
List<String> getAdvertisedAddresses(@GerritServerConfig Config cfg, @SshListenAddresses List<SocketAddress> listen) {
String[] want = cfg.getStringList("sshd", null, "advertisedaddress");
if (want.length > 0) {
return Arrays.asList(want);
}
List<InetSocketAddress> pub = new ArrayList<>();
List<InetSocketAddress> local = new ArrayList<>();
for (SocketAddress addr : listen) {
if (addr instanceof InetSocketAddress) {
InetSocketAddress inetAddr = (InetSocketAddress) addr;
if (inetAddr.getAddress().isLoopbackAddress()) {
local.add(inetAddr);
} else {
pub.add(inetAddr);
}
}
}
if (pub.isEmpty()) {
pub = local;
}
List<String> adv = Lists.newArrayListWithCapacity(pub.size());
for (InetSocketAddress addr : pub) {
adv.add(SocketUtil.format(addr, IANA_SSH_PORT));
}
return adv;
}
use of com.google.inject.Singleton in project gerrit by GerritCodeReview.
the class InMemoryModule method configure.
@Override
protected void configure() {
// Do NOT bind @RemotePeer, as it is bound in a child injector of
// ChangeMergeQueue (bound via GerritGlobalModule below), so there cannot be
// a binding in the parent injector. If you need @RemotePeer, you must bind
// it in a child injector of the one containing InMemoryModule. But unless
// you really need to test something request-scoped, you likely don't
// actually need it.
// For simplicity, don't create child injectors, just use this one to get a
// few required modules.
Injector cfgInjector = Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
bind(Config.class).annotatedWith(GerritServerConfig.class).toInstance(cfg);
}
});
bind(MetricMaker.class).to(DisabledMetricMaker.class);
install(cfgInjector.getInstance(GerritGlobalModule.class));
install(new DefaultPermissionBackendModule());
install(new SearchingChangeCacheImpl.Module());
factory(GarbageCollection.Factory.class);
bindScope(RequestScoped.class, PerThreadRequestScope.REQUEST);
// TODO(dborowitz): Use jimfs.
bind(Path.class).annotatedWith(SitePath.class).toInstance(Paths.get("."));
bind(Config.class).annotatedWith(GerritServerConfig.class).toInstance(cfg);
bind(GerritOptions.class).toInstance(new GerritOptions(cfg, false, false, false));
bind(PersonIdent.class).annotatedWith(GerritPersonIdent.class).toProvider(GerritPersonIdentProvider.class);
bind(String.class).annotatedWith(AnonymousCowardName.class).toProvider(AnonymousCowardNameProvider.class);
bind(String.class).annotatedWith(GerritServerId.class).toInstance("gerrit");
bind(AllProjectsName.class).toProvider(AllProjectsNameProvider.class);
bind(AllUsersName.class).toProvider(AllUsersNameProvider.class);
bind(GitRepositoryManager.class).to(InMemoryRepositoryManager.class);
bind(InMemoryRepositoryManager.class).in(SINGLETON);
bind(TrackingFooters.class).toProvider(TrackingFootersProvider.class).in(SINGLETON);
bind(NotesMigration.class).toInstance(notesMigration);
bind(ListeningExecutorService.class).annotatedWith(ChangeUpdateExecutor.class).toInstance(MoreExecutors.newDirectExecutorService());
bind(DataSourceType.class).to(InMemoryH2Type.class);
bind(ChangeBundleReader.class).to(GwtormChangeBundleReader.class);
bind(SecureStore.class).to(DefaultSecureStore.class);
TypeLiteral<SchemaFactory<ReviewDb>> schemaFactory = new TypeLiteral<SchemaFactory<ReviewDb>>() {
};
bind(schemaFactory).to(NotesMigrationSchemaFactory.class);
bind(Key.get(schemaFactory, ReviewDbFactory.class)).to(InMemoryDatabase.class);
install(NoSshKeyCache.module());
install(new CanonicalWebUrlModule() {
@Override
protected Class<? extends Provider<String>> provider() {
return CanonicalWebUrlProvider.class;
}
});
//Replacement of DiffExecutorModule to not use thread pool in the tests
install(new AbstractModule() {
@Override
protected void configure() {
}
@Provides
@Singleton
@DiffExecutor
public ExecutorService createDiffExecutor() {
return MoreExecutors.newDirectExecutorService();
}
});
install(new DefaultCacheFactory.Module());
install(new FakeEmailSender.Module());
install(new SignedTokenEmailTokenVerifier.Module());
install(new GpgModule(cfg));
install(new H2AccountPatchReviewStore.InMemoryModule());
bind(AllAccountsIndexer.class).toProvider(Providers.of(null));
bind(AllChangesIndexer.class).toProvider(Providers.of(null));
bind(AllGroupsIndexer.class).toProvider(Providers.of(null));
IndexType indexType = null;
try {
indexType = cfg.getEnum("index", null, "type", IndexType.LUCENE);
} catch (IllegalArgumentException e) {
// Custom index type, caller must provide their own module.
}
if (indexType != null) {
switch(indexType) {
case LUCENE:
install(luceneIndexModule());
break;
case ELASTICSEARCH:
install(elasticIndexModule());
break;
default:
throw new ProvisionException("index type unsupported in tests: " + indexType);
}
}
}
use of com.google.inject.Singleton in project cdap by caskdata.
the class UpgradeTool method createInjector.
@VisibleForTesting
Injector createInjector() throws Exception {
return Guice.createInjector(new ConfigModule(cConf, hConf), new LocationRuntimeModule().getDistributedModules(), new ZKClientModule(), new DiscoveryRuntimeModule().getDistributedModules(), new MessagingClientModule(), Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(DatasetFramework.class).to(InMemoryDatasetFramework.class).in(Scopes.SINGLETON);
// the DataSetsModules().getDistributedModules() binds to RemoteDatasetFramework so override that to
// the same InMemoryDatasetFramework
bind(DatasetFramework.class).annotatedWith(Names.named(DataSetsModules.BASE_DATASET_FRAMEWORK)).to(DatasetFramework.class);
install(new FactoryModuleBuilder().implement(DatasetDefinitionRegistry.class, DefaultDatasetDefinitionRegistry.class).build(DatasetDefinitionRegistryFactory.class));
// CDAP-5954 Upgrade tool does not need to record lineage and metadata changes for now.
bind(LineageWriter.class).to(NoOpLineageWriter.class);
}
}), new ViewAdminModules().getDistributedModules(), new StreamAdminModules().getDistributedModules(), new NotificationFeedClientModule(), new TwillModule(), new ExploreClientModule(), new ProgramRunnerRuntimeModule().getDistributedModules(), new ServiceStoreModules().getDistributedModules(), new SystemDatasetRuntimeModule().getDistributedModules(), // don't need real notifications for upgrade, so use the in-memory implementations
new NotificationServiceRuntimeModule().getInMemoryModules(), new KafkaClientModule(), new NamespaceStoreModule().getDistributedModules(), new AuthenticationContextModules().getMasterModule(), new AuthorizationModule(), new AuthorizationEnforcementModule().getMasterModule(), new SecureStoreModules().getDistributedModules(), new DataFabricModules(UpgradeTool.class.getName()).getDistributedModules(), new AppFabricServiceRuntimeModule().getDistributedModules(), new AbstractModule() {
@Override
protected void configure() {
// the DataFabricDistributedModule needs MetricsCollectionService binding and since Upgrade tool does not do
// anything with Metrics we just bind it to NoOpMetricsCollectionService
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class).in(Scopes.SINGLETON);
bind(MetricDatasetFactory.class).to(DefaultMetricDatasetFactory.class).in(Scopes.SINGLETON);
bind(MetricStore.class).to(DefaultMetricStore.class);
}
@Provides
@Singleton
@Named("datasetInstanceManager")
@SuppressWarnings("unused")
public DatasetInstanceManager getDatasetInstanceManager(TransactionSystemClientService txClient, TransactionExecutorFactory txExecutorFactory, @Named("datasetMDS") DatasetFramework framework) {
return new DatasetInstanceManager(txClient, txExecutorFactory, framework);
}
// This is needed because the LocalApplicationManager
// expects a dsframework injection named datasetMDS
@Provides
@Singleton
@Named("datasetMDS")
@SuppressWarnings("unused")
public DatasetFramework getInDsFramework(DatasetFramework dsFramework) {
return dsFramework;
}
});
}
use of com.google.inject.Singleton in project cdap by caskdata.
the class CoprocessorBuildTool method main.
public static void main(final String[] args) throws ParseException {
Options options = new Options().addOption(new Option("h", "help", false, "Print this usage message.")).addOption(new Option("f", "force", false, "Overwrites any coprocessors that already exist."));
CommandLineParser parser = new BasicParser();
CommandLine commandLine = parser.parse(options, args);
String[] commandArgs = commandLine.getArgs();
// if help is an option, or if there isn't a single 'ensure' command, print usage and exit.
if (commandLine.hasOption("h") || commandArgs.length != 1 || !"check".equalsIgnoreCase(commandArgs[0])) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp(CoprocessorBuildTool.class.getName() + " check", "Checks that HBase coprocessors required by CDAP are loaded onto HDFS. " + "If not, the coprocessors are built and placed on HDFS.", options, "");
System.exit(0);
}
boolean overwrite = commandLine.hasOption("f");
CConfiguration cConf = CConfiguration.create();
Configuration hConf = HBaseConfiguration.create();
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), // for LocationFactory
new PrivateModule() {
@Override
protected void configure() {
bind(FileContext.class).toProvider(FileContextProvider.class).in(Scopes.SINGLETON);
expose(LocationFactory.class);
}
@Provides
@Singleton
private LocationFactory providesLocationFactory(Configuration hConf, CConfiguration cConf, FileContext fc) {
final String namespace = cConf.get(Constants.CFG_HDFS_NAMESPACE);
if (UserGroupInformation.isSecurityEnabled()) {
return new FileContextLocationFactory(hConf, namespace);
}
return new InsecureFileContextLocationFactory(hConf, namespace, fc);
}
});
try {
SecurityUtil.loginForMasterService(cConf);
} catch (Exception e) {
LOG.error("Failed to login as CDAP user", e);
System.exit(1);
}
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
CoprocessorManager coprocessorManager = new CoprocessorManager(cConf, locationFactory, tableUtil);
try {
Location location = coprocessorManager.ensureCoprocessorExists(overwrite);
LOG.info("coprocessor exists at {}.", location);
} catch (IOException e) {
LOG.error("Unable to build and upload coprocessor jars.", e);
System.exit(1);
}
}
Aggregations