use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalCreate.
@CheckForNull
private <T extends Document> boolean internalCreate(Collection<T> collection, List<UpdateOp> updates) {
final Stopwatch watch = startWatch();
List<String> ids = new ArrayList<String>(updates.size());
boolean success = true;
try {
// try up to CHUNKSIZE ops in one transaction
for (List<UpdateOp> chunks : Lists.partition(updates, CHUNKSIZE)) {
List<T> docs = new ArrayList<T>();
for (UpdateOp update : chunks) {
ids.add(update.getId());
maintainUpdateStats(collection, update.getId());
UpdateUtils.assertUnconditional(update);
T doc = collection.newDocument(this);
addUpdateCounters(update);
UpdateUtils.applyChanges(doc, update);
if (!update.getId().equals(doc.getId())) {
throw new DocumentStoreException("ID mismatch - UpdateOp: " + update.getId() + ", ID property: " + doc.getId());
}
docs.add(doc);
}
boolean done = insertDocuments(collection, docs);
if (done) {
if (collection == Collection.NODES) {
for (T doc : docs) {
nodesCache.putIfAbsent((NodeDocument) doc);
}
}
} else {
success = false;
}
}
return success;
} catch (DocumentStoreException ex) {
return false;
} finally {
stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, ids, success);
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method readDocumentUncached.
@CheckForNull
private <T extends Document> T readDocumentUncached(Collection<T> collection, String id, NodeDocument cachedDoc) {
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
final Stopwatch watch = startWatch();
boolean docFound = true;
try {
long lastmodcount = -1, lastmodified = -1;
if (cachedDoc != null) {
lastmodcount = modcountOf(cachedDoc);
lastmodified = modifiedOf(cachedDoc);
}
connection = this.ch.getROConnection();
RDBRow row = db.read(connection, tmd, id, lastmodcount, lastmodified);
connection.commit();
if (row == null) {
docFound = false;
return null;
} else {
if (lastmodcount == row.getModcount() && lastmodified == row.getModified() && lastmodified >= 1) {
// we can re-use the cached document
cachedDoc.markUpToDate(System.currentTimeMillis());
return castAsT(cachedDoc);
} else {
return convertFromDBObject(collection, row);
}
}
} catch (Exception ex) {
throw new DocumentStoreException(ex);
} finally {
this.ch.closeConnection(connection);
stats.doneFindUncached(watch.elapsed(TimeUnit.NANOSECONDS), collection, id, docFound, false);
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalQuery.
private <T extends Document> List<T> internalQuery(Collection<T> collection, String fromKey, String toKey, List<String> excludeKeyPatterns, List<QueryCondition> conditions, int limit) {
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
String message = "indexed property " + cond.getPropertyName() + " not supported, query was '" + cond.getOperator() + "'" + cond.getValue() + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
}
final Stopwatch watch = startWatch();
int resultSize = 0;
CacheChangesTracker tracker = null;
try {
if (collection == Collection.NODES) {
tracker = nodesCache.registerTracker(fromKey, toKey);
}
long now = System.currentTimeMillis();
connection = this.ch.getROConnection();
String from = collection == Collection.NODES && NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
String to = collection == Collection.NODES && NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
List<RDBRow> dbresult = db.query(connection, tmd, from, to, excludeKeyPatterns, conditions, limit);
connection.commit();
int size = dbresult.size();
List<T> result = new ArrayList<T>(size);
for (int i = 0; i < size; i++) {
// free RDBRow as early as possible
RDBRow row = dbresult.set(i, null);
T doc = getIfCached(collection, row.getId(), row.getModcount());
if (doc == null) {
// parse DB contents into document if and only if it's not
// already in the cache
doc = convertFromDBObject(collection, row);
} else {
// we got a document from the cache, thus collection is NODES
// and a tracker is present
long lastmodified = modifiedOf(doc);
if (lastmodified == row.getModified() && lastmodified >= 1) {
Lock lock = locks.acquire(row.getId());
try {
if (!tracker.mightBeenAffected(row.getId())) {
// otherwise mark it as fresh
((NodeDocument) doc).markUpToDate(now);
}
} finally {
lock.unlock();
}
} else {
// we need a fresh document instance
doc = convertFromDBObject(collection, row);
}
}
result.add(doc);
}
if (collection == Collection.NODES) {
nodesCache.putNonConflictingDocs(tracker, castAsNodeDocumentList(result));
}
resultSize = result.size();
return result;
} catch (Exception ex) {
LOG.error("SQL exception on query", ex);
throw new DocumentStoreException(ex);
} finally {
if (tracker != null) {
tracker.close();
}
this.ch.closeConnection(connection);
stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey, !conditions.isEmpty(), resultSize, -1, false);
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBDocumentStore method determineServerTimeDifferenceMillis.
@Override
public long determineServerTimeDifferenceMillis() {
Connection connection = null;
try {
connection = this.ch.getROConnection();
long result = this.db.determineServerTimeDifferenceMillis(connection);
connection.commit();
return result;
} catch (SQLException ex) {
LOG.error("Trying to determine time difference to server", ex);
throw new DocumentStoreException(ex);
} finally {
this.ch.closeConnection(connection);
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentStoreException in project jackrabbit-oak by apache.
the class RDBExport method dumpFile.
private static void dumpFile(String filename, String lobdir, Format format, PrintStream out, List<String> fieldNames, List<String> columnNames, RDBDocumentSerializer ser) throws IOException {
File f = new File(filename);
File lobDirectory = lobdir == null ? new File(f.getParentFile(), "lobdir") : new File(lobdir);
int iId = columnNames.indexOf("id");
int iModified = columnNames.indexOf("modified");
int iHasBinary = columnNames.indexOf("hasbinary");
int iDeletedOnce = columnNames.indexOf("deletedonce");
int iModCount = columnNames.indexOf("modcount");
int iCModCount = columnNames.indexOf("cmodcount");
int iData = columnNames.indexOf("data");
int iBData = columnNames.indexOf("bdata");
if (iId < 0 || iModified < 0 || iHasBinary < 0 || iDeletedOnce < 0 || iModCount < 0 || iCModCount < 0 || iData < 0 || iBData < 0) {
throw new IOException("required columns: id, modified, hasbinary, deletedonce, modcount, cmodcount, data, bdata");
}
FileInputStream fis = new FileInputStream(f);
InputStreamReader ir = new InputStreamReader(fis, UTF8);
BufferedReader br = new BufferedReader(ir);
if (format == Format.JSONARRAY) {
out.println("[");
} else if (format == Format.CSV) {
out.println(dumpFieldNames(fieldNames));
}
boolean needComma = format == Format.JSONARRAY;
String line = br.readLine();
while (line != null) {
List<String> fields = parseDel(line);
String id = fields.get(iId);
String smodified = fields.get(iModified);
String shasbinary = fields.get(iHasBinary);
String sdeletedonce = fields.get(iDeletedOnce);
String smodcount = fields.get(iModCount);
String scmodcount = fields.get(iCModCount);
String sdata = fields.get(iData);
String sbdata = fields.get(iBData);
byte[] bytes = null;
if (sbdata.length() != 0) {
String lobfile = sbdata.replace("/", "");
int lastdot = lobfile.lastIndexOf('.');
String length = lobfile.substring(lastdot + 1);
lobfile = lobfile.substring(0, lastdot);
lastdot = lobfile.lastIndexOf('.');
String startpos = lobfile.substring(lastdot + 1);
lobfile = lobfile.substring(0, lastdot);
int s = Integer.valueOf(startpos);
int l = Integer.valueOf(length);
File lf = new File(lobDirectory, lobfile);
InputStream is = new FileInputStream(lf);
bytes = new byte[l];
IOUtils.skip(is, s);
IOUtils.read(is, bytes, 0, l);
IOUtils.closeQuietly(is);
}
try {
RDBRow row = new RDBRow(id, "1".equals(shasbinary) ? 1L : 0L, "1".equals(sdeletedonce), smodified.length() == 0 ? 0 : Long.parseLong(smodified), Long.parseLong(smodcount), Long.parseLong(scmodcount), sdata, bytes);
StringBuilder fulljson = dumpRow(ser, id, row);
if (format == Format.CSV) {
out.println(asCSV(fieldNames, fulljson));
} else {
fulljson = asJSON(fieldNames, fulljson);
if (format == Format.JSONARRAY && needComma) {
fulljson.append(",");
}
out.println(fulljson);
needComma = true;
}
} catch (DocumentStoreException ex) {
System.err.println("Error: skipping line for ID " + id + " because of " + ex.getMessage());
}
line = br.readLine();
}
br.close();
if (format == Format.JSONARRAY) {
out.println("]");
}
}
Aggregations