Search in sources :

Example 16 with UpdateOp

use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.

the class MemoryDocumentStore method create.

@Override
public <T extends Document> boolean create(Collection<T> collection, List<UpdateOp> updateOps) {
    Lock lock = rwLock.writeLock();
    lock.lock();
    try {
        ConcurrentSkipListMap<String, T> map = getMap(collection);
        for (UpdateOp op : updateOps) {
            if (map.containsKey(op.getId())) {
                return false;
            }
        }
        for (UpdateOp op : updateOps) {
            assertUnconditional(op);
            internalCreateOrUpdate(collection, op, false);
        }
        return true;
    } finally {
        lock.unlock();
    }
}
Also used : UpdateOp(org.apache.jackrabbit.oak.plugins.document.UpdateOp) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) Lock(java.util.concurrent.locks.Lock)

Example 17 with UpdateOp

use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.

the class MongoDocumentStore method create.

@Override
public <T extends Document> boolean create(Collection<T> collection, List<UpdateOp> updateOps) {
    log("create", updateOps);
    List<T> docs = new ArrayList<T>();
    DBObject[] inserts = new DBObject[updateOps.size()];
    List<String> ids = Lists.newArrayListWithCapacity(updateOps.size());
    for (int i = 0; i < updateOps.size(); i++) {
        inserts[i] = new BasicDBObject();
        UpdateOp update = updateOps.get(i);
        inserts[i].put(Document.ID, update.getId());
        UpdateUtils.assertUnconditional(update);
        T target = collection.newDocument(this);
        UpdateUtils.applyChanges(target, update);
        docs.add(target);
        ids.add(updateOps.get(i).getId());
        for (Entry<Key, Operation> entry : update.getChanges().entrySet()) {
            Key k = entry.getKey();
            Operation op = entry.getValue();
            switch(op.type) {
                case SET:
                case MAX:
                case INCREMENT:
                    {
                        inserts[i].put(k.toString(), op.value);
                        break;
                    }
                case SET_MAP_ENTRY:
                    {
                        Revision r = k.getRevision();
                        if (r == null) {
                            throw new IllegalStateException("SET_MAP_ENTRY must not have null revision");
                        }
                        DBObject value = (DBObject) inserts[i].get(k.getName());
                        if (value == null) {
                            value = new RevisionEntry(r, op.value);
                            inserts[i].put(k.getName(), value);
                        } else if (value.keySet().size() == 1) {
                            String key = value.keySet().iterator().next();
                            Object val = value.get(key);
                            value = new BasicDBObject(key, val);
                            value.put(r.toString(), op.value);
                            inserts[i].put(k.getName(), value);
                        } else {
                            value.put(r.toString(), op.value);
                        }
                        break;
                    }
                case REMOVE:
                case REMOVE_MAP_ENTRY:
                    // nothing to do for new entries
                    break;
            }
        }
        if (!inserts[i].containsField(Document.MOD_COUNT)) {
            inserts[i].put(Document.MOD_COUNT, 1L);
            target.put(Document.MOD_COUNT, 1L);
        }
    }
    DBCollection dbCollection = getDBCollection(collection);
    final Stopwatch watch = startWatch();
    boolean insertSuccess = false;
    try {
        try {
            dbCollection.insert(inserts);
            if (collection == Collection.NODES) {
                for (T doc : docs) {
                    nodesCache.putIfAbsent((NodeDocument) doc);
                    updateLocalChanges((NodeDocument) doc);
                }
            }
            insertSuccess = true;
            return true;
        } catch (MongoException e) {
            return false;
        }
    } finally {
        stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, ids, insertSuccess);
    }
}
Also used : MongoException(com.mongodb.MongoException) UpdateOp(org.apache.jackrabbit.oak.plugins.document.UpdateOp) ArrayList(java.util.ArrayList) Stopwatch(com.google.common.base.Stopwatch) BulkWriteOperation(com.mongodb.BulkWriteOperation) Operation(org.apache.jackrabbit.oak.plugins.document.UpdateOp.Operation) DBObject(com.mongodb.DBObject) BasicDBObject(com.mongodb.BasicDBObject) BasicDBObject(com.mongodb.BasicDBObject) DBCollection(com.mongodb.DBCollection) Revision(org.apache.jackrabbit.oak.plugins.document.Revision) DBObject(com.mongodb.DBObject) BasicDBObject(com.mongodb.BasicDBObject) Key(org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key)

Example 18 with UpdateOp

use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.

the class MongoMissingLastRevSeekerTest method completeResult.

@Test
public void completeResult() throws Exception {
    final int NUM_DOCS = 200;
    // populate the store
    List<UpdateOp> ops = Lists.newArrayList();
    for (int i = 0; i < NUM_DOCS; i++) {
        UpdateOp op = new UpdateOp(getIdFromPath("/node-" + i), true);
        NodeDocument.setModified(op, new Revision(i * 5000, 0, 1));
        ops.add(op);
    }
    assertTrue(store.create(NODES, ops));
    Set<String> ids = Sets.newHashSet();
    boolean updated = false;
    MissingLastRevSeeker seeker = builder.createMissingLastRevSeeker();
    for (NodeDocument doc : seeker.getCandidates(0)) {
        if (!updated) {
            // as soon as we have the first document, update /node-0
            UpdateOp op = new UpdateOp(getIdFromPath("/node-0"), false);
            // and push out the _modified timestamp
            NodeDocument.setModified(op, new Revision(NUM_DOCS * 5000, 0, 1));
            // even after the update the document matches the query
            assertNotNull(store.findAndUpdate(NODES, op));
            updated = true;
        }
        if (doc.getPath().startsWith("/node-")) {
            ids.add(doc.getId());
        }
    }
    // seeker must return all documents
    assertEquals(NUM_DOCS, ids.size());
}
Also used : Revision(org.apache.jackrabbit.oak.plugins.document.Revision) UpdateOp(org.apache.jackrabbit.oak.plugins.document.UpdateOp) MissingLastRevSeeker(org.apache.jackrabbit.oak.plugins.document.MissingLastRevSeeker) NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) Test(org.junit.Test)

Example 19 with UpdateOp

use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.

the class RDBDocumentStoreJDBCTest method conditionalRead.

@Test
public void conditionalRead() throws SQLException {
    String id = this.getClass().getName() + ".conditionalRead";
    super.ds.remove(Collection.NODES, id);
    UpdateOp op = new UpdateOp(id, true);
    op.set("_modified", 1L);
    removeMe.add(id);
    assertTrue(super.ds.create(Collection.NODES, Collections.singletonList(op)));
    NodeDocument nd = super.ds.find(Collection.NODES, id, 0);
    assertNotNull(nd);
    Long lastmodcount = nd.getModCount();
    Long lastmodified = nd.getModified();
    assertNotNull(lastmodcount);
    assertNotNull(lastmodified);
    RDBTableMetaData tmd = ((RDBDocumentStore) super.ds).getTable(Collection.NODES);
    Connection con = super.rdbDataSource.getConnection();
    con.setReadOnly(true);
    try {
        RDBRow rMcNotMatch = jdbc.read(con, tmd, id, lastmodcount + 1, lastmodified);
        assertNotNull(rMcNotMatch.getData());
        RDBRow rMcNotGiven = jdbc.read(con, tmd, id, -1, lastmodified);
        assertNotNull(rMcNotGiven.getData());
        RDBRow rMcMatch = jdbc.read(con, tmd, id, lastmodcount, lastmodified);
        assertNull(rMcMatch.getData());
        RDBRow rMcMatchModNonmatch = jdbc.read(con, tmd, id, lastmodcount, lastmodified + 2);
        assertNotNull(rMcMatchModNonmatch.getData());
    } finally {
        con.close();
    }
}
Also used : RDBTableMetaData(org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.RDBTableMetaData) UpdateOp(org.apache.jackrabbit.oak.plugins.document.UpdateOp) Connection(java.sql.Connection) NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) Test(org.junit.Test) AbstractDocumentStoreTest(org.apache.jackrabbit.oak.plugins.document.AbstractDocumentStoreTest)

Example 20 with UpdateOp

use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.

the class RDBDocumentStoreJDBCTest method queryMinLastModifiedTest.

@Test
public void queryMinLastModifiedTest() throws SQLException {
    String baseName = this.getClass().getName() + "." + name.getMethodName();
    long magicValue = (long) (Math.random() * 100000);
    String baseNameNullModified = baseName + "-1";
    super.ds.remove(Collection.NODES, baseNameNullModified);
    UpdateOp op = new UpdateOp(baseNameNullModified, true);
    op.set(RDBDocumentStore.COLLISIONSMODCOUNT, magicValue);
    op.set(NodeDocument.DELETED_ONCE, true);
    assertTrue(super.ds.create(Collection.NODES, Collections.singletonList(op)));
    removeMe.add(baseNameNullModified);
    String baseName10Modified = baseName + "-2";
    super.ds.remove(Collection.NODES, baseName10Modified);
    op = new UpdateOp(baseName10Modified, true);
    op.set(RDBDocumentStore.COLLISIONSMODCOUNT, magicValue);
    op.set(NodeDocument.MODIFIED_IN_SECS, 10);
    op.set(NodeDocument.DELETED_ONCE, true);
    assertTrue(super.ds.create(Collection.NODES, Collections.singletonList(op)));
    removeMe.add(baseName10Modified);
    String baseName20Modified = baseName + "-3";
    super.ds.remove(Collection.NODES, baseName20Modified);
    op = new UpdateOp(baseName20Modified, true);
    op.set(RDBDocumentStore.COLLISIONSMODCOUNT, magicValue);
    op.set(NodeDocument.MODIFIED_IN_SECS, 20);
    op.set(NodeDocument.DELETED_ONCE, true);
    assertTrue(super.ds.create(Collection.NODES, Collections.singletonList(op)));
    removeMe.add(baseName20Modified);
    String baseName5ModifiedNoDeletedOnce = baseName + "-4";
    super.ds.remove(Collection.NODES, baseName5ModifiedNoDeletedOnce);
    op = new UpdateOp(baseName5ModifiedNoDeletedOnce, true);
    op.set(RDBDocumentStore.COLLISIONSMODCOUNT, magicValue);
    op.set(NodeDocument.MODIFIED_IN_SECS, 5);
    assertTrue(super.ds.create(Collection.NODES, Collections.singletonList(op)));
    removeMe.add(baseName5ModifiedNoDeletedOnce);
    LogCustomizer customLogs = LogCustomizer.forLogger(RDBDocumentStoreJDBC.class.getName()).enable(Level.DEBUG).contains("Aggregate query").contains("min(MODIFIED)").create();
    customLogs.starting();
    Connection con = super.rdbDataSource.getConnection();
    try {
        con.setReadOnly(true);
        RDBTableMetaData tmd = ((RDBDocumentStore) super.ds).getTable(Collection.NODES);
        List<QueryCondition> conditions = new ArrayList<QueryCondition>();
        conditions.add(new QueryCondition(RDBDocumentStore.COLLISIONSMODCOUNT, "=", magicValue));
        long min = jdbc.getLong(con, tmd, "min", "_modified", null, null, RDBDocumentStore.EMPTY_KEY_PATTERN, conditions);
        assertEquals(5, min);
        con.commit();
    } finally {
        con.close();
        assertEquals("should have a DEBUG level log entry", 1, customLogs.getLogs().size());
        customLogs.finished();
        customLogs = null;
    }
    con = super.rdbDataSource.getConnection();
    try {
        con.setReadOnly(true);
        RDBTableMetaData tmd = ((RDBDocumentStore) super.ds).getTable(Collection.NODES);
        List<QueryCondition> conditions = new ArrayList<QueryCondition>();
        conditions.add(new QueryCondition(RDBDocumentStore.COLLISIONSMODCOUNT, "=", magicValue));
        conditions.add(new QueryCondition(NodeDocument.DELETED_ONCE, "=", 1));
        long min = jdbc.getLong(con, tmd, "min", "_modified", null, null, RDBDocumentStore.EMPTY_KEY_PATTERN, conditions);
        assertEquals(10, min);
        con.commit();
    } finally {
        con.close();
    }
}
Also used : RDBTableMetaData(org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.RDBTableMetaData) UpdateOp(org.apache.jackrabbit.oak.plugins.document.UpdateOp) LogCustomizer(org.apache.jackrabbit.oak.commons.junit.LogCustomizer) Connection(java.sql.Connection) ArrayList(java.util.ArrayList) QueryCondition(org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.QueryCondition) Test(org.junit.Test) AbstractDocumentStoreTest(org.apache.jackrabbit.oak.plugins.document.AbstractDocumentStoreTest)

Aggregations

UpdateOp (org.apache.jackrabbit.oak.plugins.document.UpdateOp)21 ArrayList (java.util.ArrayList)12 NodeDocument (org.apache.jackrabbit.oak.plugins.document.NodeDocument)8 Test (org.junit.Test)8 Stopwatch (com.google.common.base.Stopwatch)5 Connection (java.sql.Connection)4 HashMap (java.util.HashMap)4 LinkedHashMap (java.util.LinkedHashMap)4 AbstractDocumentStoreTest (org.apache.jackrabbit.oak.plugins.document.AbstractDocumentStoreTest)4 Revision (org.apache.jackrabbit.oak.plugins.document.Revision)4 Lists.newArrayList (com.google.common.collect.Lists.newArrayList)3 HashSet (java.util.HashSet)3 CheckForNull (javax.annotation.CheckForNull)3 DocumentStoreException (org.apache.jackrabbit.oak.plugins.document.DocumentStoreException)3 QueryCondition (org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.QueryCondition)3 Function (com.google.common.base.Function)2 BasicDBObject (com.mongodb.BasicDBObject)2 BulkWriteOperation (com.mongodb.BulkWriteOperation)2 DBCollection (com.mongodb.DBCollection)2 DBObject (com.mongodb.DBObject)2