Search in sources :

Example 1 with CacheChangesTracker

use of org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker in project jackrabbit-oak by apache.

the class MongoDocumentStore method bulkUpdate.

private <T extends Document> Map<UpdateOp, T> bulkUpdate(Collection<T> collection, List<UpdateOp> updateOperations, Map<String, T> oldDocs) {
    Map<String, UpdateOp> bulkOperations = createMap(updateOperations);
    Set<String> lackingDocs = difference(bulkOperations.keySet(), oldDocs.keySet());
    oldDocs.putAll(findDocuments(collection, lackingDocs));
    CacheChangesTracker tracker = null;
    if (collection == Collection.NODES) {
        tracker = nodesCache.registerTracker(bulkOperations.keySet());
    }
    try {
        BulkUpdateResult bulkResult = sendBulkUpdate(collection, bulkOperations.values(), oldDocs);
        if (collection == Collection.NODES) {
            List<NodeDocument> docsToCache = new ArrayList<NodeDocument>();
            for (UpdateOp op : filterKeys(bulkOperations, in(bulkResult.upserts)).values()) {
                NodeDocument doc = Collection.NODES.newDocument(this);
                UpdateUtils.applyChanges(doc, op);
                docsToCache.add(doc);
            }
            for (String key : difference(bulkOperations.keySet(), bulkResult.failedUpdates)) {
                T oldDoc = oldDocs.get(key);
                if (oldDoc != null && oldDoc != NodeDocument.NULL) {
                    NodeDocument newDoc = (NodeDocument) applyChanges(collection, oldDoc, bulkOperations.get(key));
                    docsToCache.add(newDoc);
                }
            }
            for (NodeDocument doc : docsToCache) {
                updateLocalChanges(doc);
            }
            nodesCache.putNonConflictingDocs(tracker, docsToCache);
        }
        oldDocs.keySet().removeAll(bulkResult.failedUpdates);
        Map<UpdateOp, T> result = new HashMap<UpdateOp, T>();
        for (Entry<String, UpdateOp> entry : bulkOperations.entrySet()) {
            if (bulkResult.failedUpdates.contains(entry.getKey())) {
                continue;
            } else if (bulkResult.upserts.contains(entry.getKey())) {
                result.put(entry.getValue(), null);
            } else {
                result.put(entry.getValue(), oldDocs.get(entry.getKey()));
            }
        }
        return result;
    } finally {
        if (tracker != null) {
            tracker.close();
        }
    }
}
Also used : UpdateOp(org.apache.jackrabbit.oak.plugins.document.UpdateOp) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) CacheChangesTracker(org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker)

Example 2 with CacheChangesTracker

use of org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker in project jackrabbit-oak by apache.

the class RDBDocumentStore method internalQuery.

private <T extends Document> List<T> internalQuery(Collection<T> collection, String fromKey, String toKey, List<String> excludeKeyPatterns, List<QueryCondition> conditions, int limit) {
    Connection connection = null;
    RDBTableMetaData tmd = getTable(collection);
    for (QueryCondition cond : conditions) {
        if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
            String message = "indexed property " + cond.getPropertyName() + " not supported, query was '" + cond.getOperator() + "'" + cond.getValue() + "'; supported properties are " + INDEXEDPROPERTIES;
            LOG.info(message);
            throw new DocumentStoreException(message);
        }
    }
    final Stopwatch watch = startWatch();
    int resultSize = 0;
    CacheChangesTracker tracker = null;
    try {
        if (collection == Collection.NODES) {
            tracker = nodesCache.registerTracker(fromKey, toKey);
        }
        long now = System.currentTimeMillis();
        connection = this.ch.getROConnection();
        String from = collection == Collection.NODES && NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
        String to = collection == Collection.NODES && NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
        List<RDBRow> dbresult = db.query(connection, tmd, from, to, excludeKeyPatterns, conditions, limit);
        connection.commit();
        int size = dbresult.size();
        List<T> result = new ArrayList<T>(size);
        for (int i = 0; i < size; i++) {
            // free RDBRow as early as possible
            RDBRow row = dbresult.set(i, null);
            T doc = getIfCached(collection, row.getId(), row.getModcount());
            if (doc == null) {
                // parse DB contents into document if and only if it's not
                // already in the cache
                doc = convertFromDBObject(collection, row);
            } else {
                // we got a document from the cache, thus collection is NODES
                // and a tracker is present
                long lastmodified = modifiedOf(doc);
                if (lastmodified == row.getModified() && lastmodified >= 1) {
                    Lock lock = locks.acquire(row.getId());
                    try {
                        if (!tracker.mightBeenAffected(row.getId())) {
                            // otherwise mark it as fresh
                            ((NodeDocument) doc).markUpToDate(now);
                        }
                    } finally {
                        lock.unlock();
                    }
                } else {
                    // we need a fresh document instance
                    doc = convertFromDBObject(collection, row);
                }
            }
            result.add(doc);
        }
        if (collection == Collection.NODES) {
            nodesCache.putNonConflictingDocs(tracker, castAsNodeDocumentList(result));
        }
        resultSize = result.size();
        return result;
    } catch (Exception ex) {
        LOG.error("SQL exception on query", ex);
        throw new DocumentStoreException(ex);
    } finally {
        if (tracker != null) {
            tracker.close();
        }
        this.ch.closeConnection(connection);
        stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey, !conditions.isEmpty(), resultSize, -1, false);
    }
}
Also used : DocumentStoreException(org.apache.jackrabbit.oak.plugins.document.DocumentStoreException) Connection(java.sql.Connection) Stopwatch(com.google.common.base.Stopwatch) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) UnsupportedEncodingException(java.io.UnsupportedEncodingException) DocumentStoreException(org.apache.jackrabbit.oak.plugins.document.DocumentStoreException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Lock(java.util.concurrent.locks.Lock) CacheChangesTracker(org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker)

Example 3 with CacheChangesTracker

use of org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker in project jackrabbit-oak by apache.

the class MongoDocumentStore method queryInternal.

@SuppressWarnings("unchecked")
@Nonnull
<T extends Document> List<T> queryInternal(Collection<T> collection, String fromKey, String toKey, String indexedProperty, long startValue, int limit, long maxQueryTime) {
    log("query", fromKey, toKey, indexedProperty, startValue, limit);
    DBCollection dbCollection = getDBCollection(collection);
    QueryBuilder queryBuilder = QueryBuilder.start(Document.ID);
    queryBuilder.greaterThan(fromKey);
    queryBuilder.lessThan(toKey);
    DBObject hint = new BasicDBObject(NodeDocument.ID, 1);
    if (indexedProperty != null) {
        if (NodeDocument.DELETED_ONCE.equals(indexedProperty)) {
            if (startValue != 1) {
                throw new DocumentStoreException("unsupported value for property " + NodeDocument.DELETED_ONCE);
            }
            queryBuilder.and(indexedProperty);
            queryBuilder.is(true);
        } else {
            queryBuilder.and(indexedProperty);
            queryBuilder.greaterThanEquals(startValue);
            if (NodeDocument.MODIFIED_IN_SECS.equals(indexedProperty) && canUseModifiedTimeIdx(startValue)) {
                hint = new BasicDBObject(NodeDocument.MODIFIED_IN_SECS, -1);
            }
        }
    }
    DBObject query = queryBuilder.get();
    String parentId = Utils.getParentIdFromLowerLimit(fromKey);
    long lockTime = -1;
    final Stopwatch watch = startWatch();
    boolean isSlaveOk = false;
    int resultSize = 0;
    CacheChangesTracker cacheChangesTracker = null;
    if (parentId != null && collection == Collection.NODES) {
        cacheChangesTracker = nodesCache.registerTracker(fromKey, toKey);
    }
    try {
        DBCursor cursor = dbCollection.find(query).sort(BY_ID_ASC);
        if (!disableIndexHint && !hasModifiedIdCompoundIndex) {
            cursor.hint(hint);
        }
        if (maxQueryTime > 0) {
            // OAK-2614: set maxTime if maxQueryTimeMS > 0
            cursor.maxTime(maxQueryTime, TimeUnit.MILLISECONDS);
        }
        ReadPreference readPreference = getMongoReadPreference(collection, parentId, null, getDefaultReadPreference(collection));
        if (readPreference.isSlaveOk()) {
            isSlaveOk = true;
            LOG.trace("Routing call to secondary for fetching children from [{}] to [{}]", fromKey, toKey);
        }
        cursor.setReadPreference(readPreference);
        List<T> list;
        try {
            list = new ArrayList<T>();
            for (int i = 0; i < limit && cursor.hasNext(); i++) {
                DBObject o = cursor.next();
                T doc = convertFromDBObject(collection, o);
                list.add(doc);
            }
            resultSize = list.size();
        } finally {
            cursor.close();
        }
        if (cacheChangesTracker != null) {
            nodesCache.putNonConflictingDocs(cacheChangesTracker, (List<NodeDocument>) list);
        }
        return list;
    } finally {
        if (cacheChangesTracker != null) {
            cacheChangesTracker.close();
        }
        stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey, indexedProperty != null, resultSize, lockTime, isSlaveOk);
    }
}
Also used : DocumentStoreException(org.apache.jackrabbit.oak.plugins.document.DocumentStoreException) ReadPreference(com.mongodb.ReadPreference) Stopwatch(com.google.common.base.Stopwatch) QueryBuilder(com.mongodb.QueryBuilder) NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) DBObject(com.mongodb.DBObject) BasicDBObject(com.mongodb.BasicDBObject) DBCollection(com.mongodb.DBCollection) BasicDBObject(com.mongodb.BasicDBObject) DBCursor(com.mongodb.DBCursor) CacheChangesTracker(org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker) Nonnull(javax.annotation.Nonnull)

Aggregations

NodeDocument (org.apache.jackrabbit.oak.plugins.document.NodeDocument)3 CacheChangesTracker (org.apache.jackrabbit.oak.plugins.document.cache.CacheChangesTracker)3 Stopwatch (com.google.common.base.Stopwatch)2 ArrayList (java.util.ArrayList)2 DocumentStoreException (org.apache.jackrabbit.oak.plugins.document.DocumentStoreException)2 Lists.newArrayList (com.google.common.collect.Lists.newArrayList)1 BasicDBObject (com.mongodb.BasicDBObject)1 DBCollection (com.mongodb.DBCollection)1 DBCursor (com.mongodb.DBCursor)1 DBObject (com.mongodb.DBObject)1 QueryBuilder (com.mongodb.QueryBuilder)1 ReadPreference (com.mongodb.ReadPreference)1 IOException (java.io.IOException)1 UnsupportedEncodingException (java.io.UnsupportedEncodingException)1 Connection (java.sql.Connection)1 SQLException (java.sql.SQLException)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 ExecutionException (java.util.concurrent.ExecutionException)1 Lock (java.util.concurrent.locks.Lock)1