use of org.apache.jackrabbit.oak.plugins.document.locks.StripedNodeDocumentLocks in project jackrabbit-oak by apache.
the class RDBDocumentStore method initialize.
private void initialize(DataSource ds, DocumentMK.Builder builder, RDBOptions options) throws Exception {
this.stats = builder.getDocumentStoreStatsCollector();
this.tableMeta.put(Collection.NODES, new RDBTableMetaData(createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.NODES))));
this.tableMeta.put(Collection.CLUSTER_NODES, new RDBTableMetaData(createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.CLUSTER_NODES))));
this.tableMeta.put(Collection.JOURNAL, new RDBTableMetaData(createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.JOURNAL))));
this.tableMeta.put(Collection.SETTINGS, new RDBTableMetaData(createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.SETTINGS))));
this.ch = new RDBConnectionHandler(ds);
this.callStack = LOG.isDebugEnabled() ? new Exception("call stack of RDBDocumentStore creation") : null;
this.locks = new StripedNodeDocumentLocks();
this.nodesCache = builder.buildNodeDocumentCache(this, locks);
Connection con = this.ch.getRWConnection();
int isolation = con.getTransactionIsolation();
String isolationDiags = RDBJDBCTools.isolationLevelToString(isolation);
if (isolation != Connection.TRANSACTION_READ_COMMITTED) {
LOG.info("Detected transaction isolation level " + isolationDiags + " is " + (isolation < Connection.TRANSACTION_READ_COMMITTED ? "lower" : "higher") + " than expected " + RDBJDBCTools.isolationLevelToString(Connection.TRANSACTION_READ_COMMITTED) + " - check datasource configuration");
}
DatabaseMetaData md = con.getMetaData();
String dbDesc = String.format("%s %s (%d.%d)", md.getDatabaseProductName(), md.getDatabaseProductVersion(), md.getDatabaseMajorVersion(), md.getDatabaseMinorVersion()).replaceAll("[\r\n\t]", " ").trim();
String driverDesc = String.format("%s %s (%d.%d)", md.getDriverName(), md.getDriverVersion(), md.getDriverMajorVersion(), md.getDriverMinorVersion()).replaceAll("[\r\n\t]", " ").trim();
String dbUrl = md.getURL();
this.dbInfo = RDBDocumentStoreDB.getValue(md.getDatabaseProductName());
this.db = new RDBDocumentStoreJDBC(this.dbInfo, this.ser, QUERYHITSLIMIT, QUERYTIMELIMIT);
this.metadata = ImmutableMap.<String, String>builder().put("type", "rdb").put("db", md.getDatabaseProductName()).put("version", md.getDatabaseProductVersion()).build();
String versionDiags = dbInfo.checkVersion(md);
if (!versionDiags.isEmpty()) {
LOG.error(versionDiags);
}
if (!"".equals(dbInfo.getInitializationStatement())) {
Statement stmt = null;
try {
stmt = con.createStatement();
stmt.execute(dbInfo.getInitializationStatement());
stmt.close();
con.commit();
} finally {
closeStatement(stmt);
}
}
List<String> tablesCreated = new ArrayList<String>();
List<String> tablesPresent = new ArrayList<String>();
StringBuilder tableDiags = new StringBuilder();
try {
createTableFor(con, Collection.CLUSTER_NODES, this.tableMeta.get(Collection.CLUSTER_NODES), tablesCreated, tablesPresent, tableDiags, options.getInitialSchema(), options.getUpgradeToSchema());
createTableFor(con, Collection.NODES, this.tableMeta.get(Collection.NODES), tablesCreated, tablesPresent, tableDiags, options.getInitialSchema(), options.getUpgradeToSchema());
createTableFor(con, Collection.SETTINGS, this.tableMeta.get(Collection.SETTINGS), tablesCreated, tablesPresent, tableDiags, options.getInitialSchema(), options.getUpgradeToSchema());
createTableFor(con, Collection.JOURNAL, this.tableMeta.get(Collection.JOURNAL), tablesCreated, tablesPresent, tableDiags, options.getInitialSchema(), options.getUpgradeToSchema());
} finally {
con.commit();
con.close();
}
if (options.isDropTablesOnClose()) {
tablesToBeDropped.addAll(tablesCreated);
}
if (tableDiags.length() != 0) {
tableDiags.insert(0, ", ");
}
String diag = dbInfo.getAdditionalDiagnostics(this.ch, this.tableMeta.get(Collection.NODES).getName());
LOG.info("RDBDocumentStore (" + OakVersion.getVersion() + ") instantiated for database " + dbDesc + ", using driver: " + driverDesc + ", connecting to: " + dbUrl + (diag.isEmpty() ? "" : (", properties: " + diag)) + ", transaction isolation level: " + isolationDiags + tableDiags);
if (!tablesPresent.isEmpty()) {
LOG.info("Tables present upon startup: " + tablesPresent);
}
if (!tablesCreated.isEmpty()) {
LOG.info("Tables created upon startup: " + tablesCreated + (options.isDropTablesOnClose() ? " (will be dropped on exit)" : ""));
}
}
use of org.apache.jackrabbit.oak.plugins.document.locks.StripedNodeDocumentLocks in project jackrabbit-oak by apache.
the class CacheChangesTrackerTest method createCache.
private NodeDocumentCache createCache() {
Cache<CacheValue, NodeDocument> nodeDocumentsCache = new CacheLIRS<CacheValue, NodeDocument>(10);
Cache<StringValue, NodeDocument> prevDocumentsCache = new CacheLIRS<StringValue, NodeDocument>(10);
CacheStats nodeDocumentsCacheStats = Mockito.mock(CacheStats.class);
CacheStats prevDocumentsCacheStats = Mockito.mock(CacheStats.class);
NodeDocumentLocks locks = new StripedNodeDocumentLocks();
return new NodeDocumentCache(nodeDocumentsCache, nodeDocumentsCacheStats, prevDocumentsCache, prevDocumentsCacheStats, locks);
}
use of org.apache.jackrabbit.oak.plugins.document.locks.StripedNodeDocumentLocks in project jackrabbit-oak by apache.
the class RDBDocumentStore method initialize.
private void initialize(DataSource ds, DocumentNodeStoreBuilder<?> builder, RDBOptions options) throws Exception {
this.stats = builder.getDocumentStoreStatsCollector();
this.callStack = LOG.isDebugEnabled() ? new Exception("call stack of RDBDocumentStore creation") : null;
this.ch = new RDBConnectionHandler(ds);
Connection con = this.ch.getRWConnection();
String catalog = con.getCatalog();
DatabaseMetaData md = con.getMetaData();
if (null == catalog) {
// Oracle
catalog = md.getUserName();
}
this.tableMeta.put(Collection.NODES, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.NODES))));
this.tableMeta.put(Collection.CLUSTER_NODES, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.CLUSTER_NODES))));
this.tableMeta.put(Collection.JOURNAL, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.JOURNAL))));
this.tableMeta.put(Collection.SETTINGS, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.SETTINGS))));
this.locks = new StripedNodeDocumentLocks();
this.nodesCache = builder.buildNodeDocumentCache(this, locks);
int isolation = con.getTransactionIsolation();
String isolationDiags = RDBJDBCTools.isolationLevelToString(isolation);
if (isolation != Connection.TRANSACTION_READ_COMMITTED) {
LOG.info("Detected transaction isolation level " + isolationDiags + " is " + (isolation < Connection.TRANSACTION_READ_COMMITTED ? "lower" : "higher") + " than expected " + RDBJDBCTools.isolationLevelToString(Connection.TRANSACTION_READ_COMMITTED) + " - check datasource configuration");
}
String dbDesc = String.format("%s %s (%d.%d)", md.getDatabaseProductName(), md.getDatabaseProductVersion(), md.getDatabaseMajorVersion(), md.getDatabaseMinorVersion()).replaceAll("[\r\n\t]", " ").trim();
String driverDesc = String.format("%s %s (%d.%d)", md.getDriverName(), md.getDriverVersion(), md.getDriverMajorVersion(), md.getDriverMinorVersion()).replaceAll("[\r\n\t]", " ").trim();
String dbUrl = md.getURL();
this.dbInfo = RDBDocumentStoreDB.getValue(md.getDatabaseProductName());
this.db = new RDBDocumentStoreJDBC(this.dbInfo, this.ser, QUERYHITSLIMIT, QUERYTIMELIMIT);
this.metadata = ImmutableMap.<String, String>builder().put("type", "rdb").put("db", md.getDatabaseProductName()).put("version", md.getDatabaseProductVersion()).put("driver", md.getDriverName()).put("driverVersion", md.getDriverVersion()).build();
String versionDiags = dbInfo.checkVersion(md);
if (!versionDiags.isEmpty()) {
LOG.error(versionDiags);
}
if (!"".equals(dbInfo.getInitializationStatement())) {
Statement stmt = null;
try {
stmt = con.createStatement();
stmt.execute(dbInfo.getInitializationStatement());
stmt.close();
con.commit();
} finally {
closeStatement(stmt);
}
}
List<String> tablesCreated = new ArrayList<String>();
List<String> tablesPresent = new ArrayList<String>();
try {
createTableFor(con, Collection.CLUSTER_NODES, this.tableMeta.get(Collection.CLUSTER_NODES), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema());
createTableFor(con, Collection.NODES, this.tableMeta.get(Collection.NODES), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema());
createTableFor(con, Collection.SETTINGS, this.tableMeta.get(Collection.SETTINGS), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema());
createTableFor(con, Collection.JOURNAL, this.tableMeta.get(Collection.JOURNAL), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema());
} finally {
con.commit();
con.close();
}
StringBuilder tableDiags = new StringBuilder();
RDBTableMetaData nodesMeta = this.tableMeta.get(Collection.NODES);
tableDiags.append(nodesMeta.getSchemaInfo());
if (!nodesMeta.getIndexInfo().isEmpty()) {
tableDiags.append(" /* ").append(nodesMeta.getIndexInfo()).append(" */");
}
if (options.isDropTablesOnClose()) {
tablesToBeDropped.addAll(tablesCreated);
}
if (tableDiags.length() != 0) {
tableDiags.insert(0, ", ");
}
String diag = dbInfo.getAdditionalDiagnostics(this.ch, this.tableMeta.get(Collection.NODES).getName());
LOG.info("RDBDocumentStore (" + getModuleVersion() + ") instantiated for database " + dbDesc + ", using driver: " + driverDesc + ", connecting to: " + dbUrl + (diag.isEmpty() ? "" : (", properties: " + diag)) + ", transaction isolation level: " + isolationDiags + tableDiags);
if (!tablesPresent.isEmpty()) {
LOG.info("Tables present upon startup: " + tablesPresent);
}
if (!tablesCreated.isEmpty()) {
LOG.info("Tables created upon startup: " + tablesCreated + (options.isDropTablesOnClose() ? " (will be dropped on exit)" : ""));
}
}
Aggregations