use of org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder in project jackrabbit-oak by apache.
the class UnlockUpgradeCommand method execute.
@Override
public void execute(String... args) throws Exception {
OptionParser parser = new OptionParser();
// RDB specific options
OptionSpec<String> rdbjdbcuser = parser.accepts("rdbjdbcuser", "RDB JDBC user").withOptionalArg().defaultsTo("");
OptionSpec<String> rdbjdbcpasswd = parser.accepts("rdbjdbcpasswd", "RDB JDBC password").withOptionalArg().defaultsTo("");
OptionSpec<String> nonOption = parser.nonOptions("unlockUpgrade {<jdbc-uri> | <mongodb-uri>}");
OptionSpec help = parser.acceptsAll(asList("h", "?", "help"), "show help").forHelp();
OptionSet options = parser.parse(args);
List<String> nonOptions = nonOption.values(options);
if (options.has(help)) {
parser.printHelpOn(System.out);
return;
}
if (nonOptions.isEmpty()) {
parser.printHelpOn(System.err);
return;
}
DocumentStore store = null;
try {
String uri = nonOptions.get(0);
if (uri.startsWith(MONGODB_PREFIX)) {
MongoClientURI clientURI = new MongoClientURI(uri);
if (clientURI.getDatabase() == null) {
System.err.println("Database missing in MongoDB URI: " + clientURI.getURI());
} else {
MongoConnection mongo = new MongoConnection(clientURI.getURI());
store = new MongoDocumentStore(mongo.getMongoClient(), mongo.getDBName(), new MongoDocumentNodeStoreBuilder());
}
} else if (uri.startsWith("jdbc")) {
DataSource ds = RDBDataSourceFactory.forJdbcUrl(uri, rdbjdbcuser.value(options), rdbjdbcpasswd.value(options));
store = new RDBDocumentStore(ds, new DocumentNodeStoreBuilder());
} else {
System.err.println("Unrecognized URI: " + uri);
}
if (store != null && VERSION.writeTo(store)) {
System.out.println("Format version set to " + VERSION);
}
} catch (DocumentStoreException e) {
System.err.println(e.getMessage());
} finally {
if (store != null) {
store.dispose();
}
}
}
use of org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder in project jackrabbit-oak by apache.
the class DocumentFixtureProvider method configureDocumentMk.
static DocumentNodeStore configureDocumentMk(Options options, BlobStore blobStore, Whiteboard wb, Closer closer, boolean readOnly) throws IOException {
CommonOptions commonOpts = options.getOptionBean(CommonOptions.class);
DocumentNodeStoreBuilder<?> builder;
if (commonOpts.isMongo()) {
builder = newMongoDocumentNodeStoreBuilder();
} else if (commonOpts.isRDB()) {
builder = newRDBDocumentNodeStoreBuilder();
} else {
throw new IllegalStateException("Unknown DocumentStore");
}
StatisticsProvider statisticsProvider = checkNotNull(getService(wb, StatisticsProvider.class));
DocumentBuilderCustomizer customizer = getService(wb, DocumentBuilderCustomizer.class);
if (customizer != null) {
customizer.customize(builder);
}
if (blobStore != null) {
builder.setBlobStore(blobStore);
}
DocumentNodeStoreOptions docStoreOpts = options.getOptionBean(DocumentNodeStoreOptions.class);
builder.setClusterId(docStoreOpts.getClusterId());
builder.setStatisticsProvider(statisticsProvider);
if (readOnly) {
builder.setReadOnlyMode();
}
int cacheSize = docStoreOpts.getCacheSize();
if (cacheSize != 0) {
builder.memoryCacheSize(cacheSize * FileUtils.ONE_MB);
}
if (docStoreOpts.disableBranchesSpec()) {
builder.disableBranches();
}
if (docStoreOpts.isCacheDistributionDefined()) {
builder.memoryCacheDistribution(docStoreOpts.getNodeCachePercentage(), docStoreOpts.getPrevDocCachePercentage(), docStoreOpts.getChildrenCachePercentage(), docStoreOpts.getDiffCachePercentage());
}
DocumentNodeStore dns;
if (commonOpts.isMongo()) {
MongoClientURI uri = new MongoClientURI(commonOpts.getStoreArg());
if (uri.getDatabase() == null) {
System.err.println("Database missing in MongoDB URI: " + uri.getURI());
System.exit(1);
}
MongoConnection mongo = new MongoConnection(uri.getURI());
wb.register(MongoConnection.class, mongo, emptyMap());
closer.register(mongo::close);
((MongoDocumentNodeStoreBuilder) builder).setMongoDB(mongo.getDB());
dns = builder.build();
wb.register(MongoDocumentStore.class, (MongoDocumentStore) builder.getDocumentStore(), emptyMap());
} else if (commonOpts.isRDB()) {
RDBStoreOptions rdbOpts = options.getOptionBean(RDBStoreOptions.class);
DataSource ds = RDBDataSourceFactory.forJdbcUrl(commonOpts.getStoreArg(), rdbOpts.getUser(), rdbOpts.getPassword());
wb.register(DataSource.class, ds, emptyMap());
((RDBDocumentNodeStoreBuilder) builder).setRDBConnection(ds);
dns = builder.build();
wb.register(RDBDocumentStore.class, (RDBDocumentStore) builder.getDocumentStore(), emptyMap());
} else {
throw new IllegalStateException("Unknown DocumentStore");
}
closer.register(dns::dispose);
return dns;
}
use of org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder in project jackrabbit-oak by apache.
the class DocumentNodeStoreService method registerNodeStore.
private void registerNodeStore() throws IOException {
DocumentNodeStoreBuilder<?> mkBuilder;
if (documentStoreType == DocumentStoreType.RDB) {
RDBDocumentNodeStoreBuilder builder = newRDBDocumentNodeStoreBuilder();
configureBuilder(builder);
checkNotNull(dataSource, "DataStore type set [%s] but DataSource reference not initialized", PROP_DS_TYPE);
if (!customBlobStore) {
checkNotNull(blobDataSource, "DataStore type set [%s] but BlobDataSource reference not initialized", PROP_DS_TYPE);
builder.setRDBConnection(dataSource, blobDataSource);
log.info("Connected to datasources {} {}", dataSource, blobDataSource);
} else {
if (blobDataSource != null && blobDataSource != dataSource) {
log.info("Ignoring blobDataSource {} as custom blob store takes precedence.", blobDataSource);
}
builder.setRDBConnection(dataSource);
log.info("Connected to datasource {}", dataSource);
}
mkBuilder = builder;
} else {
String uri = config.mongouri();
String db = config.db();
boolean soKeepAlive = config.socketKeepAlive();
MongoClientURI mongoURI = new MongoClientURI(uri);
String persistentCache = resolvePath(config.persistentCache(), DEFAULT_PERSISTENT_CACHE);
String journalCache = resolvePath(config.journalCache(), DEFAULT_JOURNAL_CACHE);
if (log.isInfoEnabled()) {
// Take care around not logging the uri directly as it
// might contain passwords
log.info("Starting DocumentNodeStore with host={}, db={}, cache size (MB)={}, persistentCache={}, " + "journalCache={}, blobCacheSize (MB)={}, maxReplicationLagInSecs={}", mongoURI.getHosts(), db, config.cache(), persistentCache, journalCache, config.blobCacheSize(), config.maxReplicationLagInSecs());
log.info("Mongo Connection details {}", MongoConnection.toString(mongoURI.getOptions()));
}
MongoDocumentNodeStoreBuilder builder = newMongoDocumentNodeStoreBuilder();
configureBuilder(builder);
builder.setMaxReplicationLag(config.maxReplicationLagInSecs(), TimeUnit.SECONDS);
builder.setSocketKeepAlive(soKeepAlive);
builder.setMongoDB(uri, db, config.blobCacheSize());
mkBuilder = builder;
log.info("Connected to database '{}'", db);
}
if (!customBlobStore) {
defaultBlobStore = mkBuilder.getBlobStore();
log.info("Registering the BlobStore with ServiceRegistry");
blobStoreReg = context.getBundleContext().registerService(BlobStore.class.getName(), defaultBlobStore, null);
}
// Set wrapping blob store after setting the DB
if (isWrappingCustomBlobStore()) {
((BlobStoreWrapper) blobStore).setBlobStore(mkBuilder.getBlobStore());
mkBuilder.setBlobStore(blobStore);
}
// attach GCMonitor
final GCMonitorTracker gcMonitor = new GCMonitorTracker();
gcMonitor.start(whiteboard);
closer.register(asCloseable(gcMonitor));
Logger vgcLogger = LoggerFactory.getLogger(VersionGarbageCollector.class);
GCMonitor loggingGCMonitor;
if (isContinuousRevisionGC()) {
// log less chatty with continuous RevisionGC
loggingGCMonitor = new QuietGCMonitor(vgcLogger);
} else {
loggingGCMonitor = new LoggingGCMonitor(vgcLogger);
}
mkBuilder.setGCMonitor(new DelegatingGCMonitor(newArrayList(gcMonitor, loggingGCMonitor)));
nodeStore = mkBuilder.build();
// ensure a clusterId is initialized
// and expose it as 'oak.clusterid' repository descriptor
GenericDescriptors clusterIdDesc = new GenericDescriptors();
clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(ClusterRepositoryInfo.getOrCreateId(nodeStore)), true, false);
whiteboard.register(Descriptors.class, clusterIdDesc, Collections.emptyMap());
// If a shared data store register the repo id in the data store
if (SharedDataStoreUtils.isShared(blobStore)) {
String repoId = null;
try {
repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
((SharedDataStore) blobStore).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY.getNameFromId(repoId));
} catch (Exception e) {
throw new IOException("Could not register a unique repositoryId", e);
}
if (blobStore instanceof BlobTrackingStore) {
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
if (trackingStore.getTracker() != null) {
trackingStore.getTracker().close();
}
((BlobTrackingStore) blobStore).addTracker(new BlobIdTracker(getRepositoryHome(), repoId, config.blobTrackSnapshotIntervalInSecs(), (SharedDataStore) blobStore));
}
}
registerJMXBeans(nodeStore, mkBuilder);
registerLastRevRecoveryJob(nodeStore);
registerJournalGC(nodeStore);
registerVersionGCJob(nodeStore);
registerDocumentStoreMetrics(mkBuilder.getDocumentStore());
if (!isNodeStoreProvider()) {
observerTracker = new ObserverTracker(nodeStore);
observerTracker.start(context.getBundleContext());
}
journalPropertyHandlerFactory.start(whiteboard);
DocumentStore ds = nodeStore.getDocumentStore();
// OAK-2682: time difference detection applied at startup with a default
// max time diff of 2000 millis (2sec)
final long maxDiff = Long.parseLong(System.getProperty("oak.documentMK.maxServerTimeDiffMillis", "2000"));
try {
if (maxDiff >= 0) {
final long timeDiff = ds.determineServerTimeDifferenceMillis();
log.info("registerNodeStore: server time difference: {}ms (max allowed: {}ms)", timeDiff, maxDiff);
if (Math.abs(timeDiff) > maxDiff) {
throw new AssertionError("Server clock seems off (" + timeDiff + "ms) by more than configured amount (" + maxDiff + "ms)");
}
}
} catch (RuntimeException e) {
// no checked exception
// in case of a RuntimeException, just log but continue
log.warn("registerNodeStore: got RuntimeException while trying to determine time difference to server: " + e, e);
}
String[] serviceClasses;
if (isNodeStoreProvider()) {
registerNodeStoreProvider(nodeStore);
serviceClasses = new String[] { DocumentNodeStore.class.getName(), Clusterable.class.getName() };
} else {
serviceClasses = new String[] { NodeStore.class.getName(), DocumentNodeStore.class.getName(), Clusterable.class.getName() };
}
Dictionary<String, Object> props = new Hashtable<String, Object>();
props.put(Constants.SERVICE_PID, DocumentNodeStore.class.getName());
props.put(DESCRIPTION, getMetadata(ds));
// OAK-2844: in order to allow DocumentDiscoveryLiteService to directly
// require a service DocumentNodeStore (instead of having to do an 'instanceof')
// the registration is now done for both NodeStore and DocumentNodeStore here.
nodeStoreReg = context.getBundleContext().registerService(serviceClasses, nodeStore, props);
}
use of org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder in project jackrabbit-oak by apache.
the class MongoFactory method create.
@Override
public NodeStore create(BlobStore blobStore, Closer closer) throws IOException {
System.setProperty(DocumentNodeStore.SYS_PROP_DISABLE_JOURNAL, "true");
MongoDocumentNodeStoreBuilder builder = baseConfiguration(newMongoDocumentNodeStoreBuilder(), cacheSize);
builder.setMongoDB(createClient(closer), getDBName());
if (blobStore != null) {
builder.setBlobStore(blobStore);
}
if (readOnly) {
builder.setReadOnlyMode();
}
DocumentNodeStore documentNodeStore = builder.build();
// TODO probably we should disable all observers, see OAK-5651
documentNodeStore.getBundlingConfigHandler().unregisterObserver();
closer.register(documentNodeStore::dispose);
return documentNodeStore;
}
Aggregations