use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalGetAggregate.
private <T extends Document> long internalGetAggregate(final Collection<T> collection, final String aggregrate, String field, String fromKey, String toKey, final List<String> excludeKeyPatterns, final List<QueryCondition> conditions) {
final RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
String message = "indexed property " + cond.getPropertyName() + " not supported, query was '" + cond.getOperator() + "'" + cond.getValue() + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
}
final String from = collection == Collection.NODES && NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
final String to = collection == Collection.NODES && NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
Connection connection = null;
try {
connection = ch.getROConnection();
long result = db.getLong(connection, tmd, aggregrate, field, from, to, excludeKeyPatterns, conditions);
connection.commit();
return result;
} catch (SQLException ex) {
LOG.error("SQL exception on query", ex);
throw new DocumentStoreException(ex);
} finally {
this.ch.closeConnection(connection);
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalUpdate.
/**
* @return previous version of document or <code>null</code>
*/
@CheckForNull
private <T extends Document> T internalUpdate(Collection<T> collection, UpdateOp update, T oldDoc, boolean checkConditions, int maxRetries) {
if (checkConditions && !UpdateUtils.checkConditions(oldDoc, update.getConditions())) {
return null;
} else {
maintainUpdateStats(collection, update.getId());
addUpdateCounters(update);
T doc = createNewDocument(collection, oldDoc, update);
Lock l = locks.acquire(update.getId());
final Stopwatch watch = startWatch();
boolean success = false;
int retries = maxRetries;
try {
while (!success && retries > 0) {
long lastmodcount = modcountOf(oldDoc);
success = updateDocument(collection, doc, update, lastmodcount);
if (!success) {
retries -= 1;
oldDoc = readDocumentCached(collection, update.getId(), Integer.MAX_VALUE);
if (oldDoc != null) {
long newmodcount = modcountOf(oldDoc);
if (lastmodcount == newmodcount) {
// cached copy did not change so it probably was
// updated by a different instance, get a fresh one
oldDoc = readDocumentUncached(collection, update.getId(), null);
}
}
if (oldDoc == null) {
// document was there but is now gone
LOG.debug("failed to apply update because document is gone in the meantime: " + update.getId(), new Exception("call stack"));
return null;
}
if (checkConditions && !UpdateUtils.checkConditions(oldDoc, update.getConditions())) {
return null;
} else {
addUpdateCounters(update);
doc = createNewDocument(collection, oldDoc, update);
}
} else {
if (collection == Collection.NODES) {
nodesCache.replaceCachedDocument((NodeDocument) oldDoc, (NodeDocument) doc);
}
}
}
if (!success) {
throw new DocumentStoreException("failed update of " + doc.getId() + " (race?) after " + maxRetries + " retries");
}
return oldDoc;
} finally {
l.unlock();
int numOfAttempts = maxRetries - retries - 1;
stats.doneFindAndModify(watch.elapsed(TimeUnit.NANOSECONDS), collection, update.getId(), false, success, numOfAttempts);
}
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method createTableFor.
private void createTableFor(Connection con, Collection<? extends Document> col, RDBTableMetaData tmd, List<String> tablesCreated, List<String> tablesPresent, StringBuilder diagnostics, int initialSchema, int upgradeToSchema) throws SQLException {
String dbname = this.dbInfo.toString();
if (con.getMetaData().getURL() != null) {
dbname += " (" + con.getMetaData().getURL() + ")";
}
String tableName = tmd.getName();
PreparedStatement checkStatement = null;
ResultSet checkResultSet = null;
Statement creatStatement = null;
Statement upgradeStatement = null;
try {
checkStatement = con.prepareStatement("select * from " + tableName + " where ID = ?");
checkStatement.setString(1, "0:/");
checkResultSet = checkStatement.executeQuery();
// try to discover size of DATA column and binary-ness of ID
ResultSetMetaData met = checkResultSet.getMetaData();
obtainFlagsFromResultSetMeta(met, tmd);
// check that all required columns are present
Set<String> requiredColumns = new HashSet<String>(REQUIREDCOLUMNS);
Set<String> unknownColumns = new HashSet<String>();
boolean hasVersionColumn = false;
for (int i = 1; i <= met.getColumnCount(); i++) {
String cname = met.getColumnName(i).toLowerCase(Locale.ENGLISH);
if (!requiredColumns.remove(cname)) {
if (!OPTIONALCOLUMNS.contains(cname)) {
unknownColumns.add(cname);
}
}
if (cname.equals("version")) {
hasVersionColumn = true;
}
}
if (!requiredColumns.isEmpty()) {
String message = String.format("Table %s: the following required columns are missing: %s", tableName, requiredColumns.toString());
LOG.error(message);
throw new DocumentStoreException(message);
}
if (!unknownColumns.isEmpty()) {
String message = String.format("Table %s: the following columns are unknown and will not be maintained: %s", tableName, unknownColumns.toString());
LOG.info(message);
}
if (col == Collection.NODES) {
String tableInfo = RDBJDBCTools.dumpResultSetMeta(met);
diagnostics.append(tableInfo);
String indexInfo = dumpIndexData(con.getMetaData(), met, tableName);
if (!indexInfo.isEmpty()) {
diagnostics.append(" ").append(indexInfo);
}
}
if (!hasVersionColumn && upgradeToSchema >= 1) {
String upStatement1 = this.dbInfo.getTableUpgradeStatement(tableName, 1);
try {
upgradeStatement = con.createStatement();
upgradeStatement.execute(upStatement1);
upgradeStatement.close();
con.commit();
LOG.info("Upgraded " + tableName + " to DB level 1 using '" + upStatement1 + "'");
} catch (SQLException exup) {
con.rollback();
LOG.info("Attempted to upgrade " + tableName + " to DB level 1 using '" + upStatement1 + "', but failed - will continue without.", exup);
}
}
tablesPresent.add(tableName);
} catch (SQLException ex) {
// table does not appear to exist
con.rollback();
PreparedStatement checkStatement2 = null;
ResultSet checkResultSet2 = null;
try {
creatStatement = con.createStatement();
creatStatement.execute(this.dbInfo.getTableCreationStatement(tableName, initialSchema));
creatStatement.close();
for (String ic : this.dbInfo.getIndexCreationStatements(tableName)) {
creatStatement = con.createStatement();
creatStatement.execute(ic);
creatStatement.close();
}
con.commit();
if (initialSchema < 1 && upgradeToSchema >= 1) {
String upStatement1 = this.dbInfo.getTableUpgradeStatement(tableName, 1);
try {
upgradeStatement = con.createStatement();
upgradeStatement.execute(upStatement1);
upgradeStatement.close();
con.commit();
LOG.info("Upgraded " + tableName + " to DB level 1 using '" + upStatement1 + "'");
} catch (SQLException exup) {
con.rollback();
LOG.info("Attempted to upgrade " + tableName + " to DB level 1 using '" + upStatement1 + "', but failed - will continue without.", exup);
}
}
tablesCreated.add(tableName);
checkStatement2 = con.prepareStatement("select * from " + tableName + " where ID = ?");
checkStatement2.setString(1, "0:/");
checkResultSet2 = checkStatement2.executeQuery();
// try to discover size of DATA column and binary-ness of ID
ResultSetMetaData met = checkResultSet2.getMetaData();
obtainFlagsFromResultSetMeta(met, tmd);
if (col == Collection.NODES) {
String tableInfo = RDBJDBCTools.dumpResultSetMeta(met);
diagnostics.append(tableInfo);
String indexInfo = dumpIndexData(con.getMetaData(), met, tableName);
if (!indexInfo.isEmpty()) {
diagnostics.append(" ").append(indexInfo);
}
}
} catch (SQLException ex2) {
LOG.error("Failed to create table " + tableName + " in " + dbname, ex2);
throw ex2;
} finally {
closeResultSet(checkResultSet2);
closeStatement(checkStatement2);
}
} finally {
closeResultSet(checkResultSet);
closeStatement(checkStatement);
closeStatement(creatStatement);
closeStatement(upgradeStatement);
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStoreJDBC method buildWhereClause.
private static String buildWhereClause(String minId, String maxId, List<String> excludeKeyPatterns, List<QueryCondition> conditions) {
StringBuilder result = new StringBuilder();
String whereSep = "";
if (minId != null) {
result.append("ID > ?");
whereSep = " and ";
}
if (maxId != null) {
result.append(whereSep).append("ID < ?");
whereSep = " and ";
}
if (excludeKeyPatterns != null && !excludeKeyPatterns.isEmpty()) {
result.append(whereSep);
whereSep = " and ";
result.append("not (");
for (int i = 0; i < excludeKeyPatterns.size(); i++) {
result.append(i == 0 ? "" : " or ");
result.append("ID like ?");
}
result.append(")");
}
for (QueryCondition cond : conditions) {
String op = cond.getOperator();
if (!SUPPORTED_OPS.contains(op)) {
throw new DocumentStoreException("unsupported operator: " + op);
}
String indexedProperty = cond.getPropertyName();
String column = INDEXED_PROP_MAPPING.get(indexedProperty);
if (column != null) {
result.append(whereSep).append(column).append(" ").append(op).append(" ?");
whereSep = " and ";
} else {
throw new DocumentStoreException("unsupported indexed property: " + indexedProperty);
}
}
return result.toString();
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBConnectionHandler method closeConnection.
/**
* Close the {@link Connection}.
*/
public void closeConnection(Connection c) {
if (c != null) {
try {
if (CHECKCONNECTIONONCLOSE) {
try {
setReadOnly(c, !c.isReadOnly());
setReadOnly(c, !c.isReadOnly());
} catch (SQLException ex2) {
LOG.error("got dirty connection", ex2);
throw new DocumentStoreException("dirty connection on close", ex2);
}
}
c.close();
} catch (SQLException ex) {
LOG.error("exception on connection close (ignored)", ex);
}
}
}
Aggregations