Search in sources :

Example 11 with WriteResult

use of com.mongodb.WriteResult in project graylog2-server by Graylog2.

the class V20161125161400_AlertReceiversMigrationTest method doMigrateSingleQualifyingStream.

@Test
public void doMigrateSingleQualifyingStream() throws Exception {
    final String matchingStreamId = new ObjectId().toHexString();
    final Stream stream1 = mock(Stream.class);
    when(stream1.getAlertReceivers()).thenReturn(Collections.emptyMap());
    final Stream stream2 = mock(Stream.class);
    when(stream2.getAlertReceivers()).thenReturn(ImmutableMap.of("users", ImmutableList.of("foouser"), "emails", ImmutableList.of("foo@bar.com")));
    when(stream2.getId()).thenReturn(matchingStreamId);
    when(this.streamService.loadAll()).thenReturn(ImmutableList.of(stream1, stream2));
    final AlertCondition alertCondition = mock(AlertCondition.class);
    when(this.streamService.getAlertConditions(eq(stream2))).thenReturn(ImmutableList.of(alertCondition));
    final String alarmCallbackId = new ObjectId().toHexString();
    final AlarmCallbackConfiguration alarmCallback = AlarmCallbackConfigurationImpl.create(alarmCallbackId, matchingStreamId, EmailAlarmCallback.class.getCanonicalName(), "Email Alert Notification", new HashMap<>(), new Date(), "admin");
    when(alarmCallbackConfigurationService.getForStream(eq(stream2))).thenReturn(ImmutableList.of(alarmCallback));
    when(alarmCallbackConfigurationService.save(eq(alarmCallback))).thenReturn(alarmCallbackId);
    when(this.dbCollection.update(any(BasicDBObject.class), any(BasicDBObject.class))).thenReturn(new WriteResult(1, true, matchingStreamId));
    this.alertReceiversMigration.upgrade();
    final ArgumentCaptor<AlarmCallbackConfiguration> configurationArgumentCaptor = ArgumentCaptor.forClass(AlarmCallbackConfiguration.class);
    verify(this.alarmCallbackConfigurationService, times(1)).save(configurationArgumentCaptor.capture());
    final AlarmCallbackConfiguration updatedConfiguration = configurationArgumentCaptor.getValue();
    assertThat(updatedConfiguration).isEqualTo(alarmCallback);
    assertThat(updatedConfiguration.getType()).isEqualTo(EmailAlarmCallback.class.getCanonicalName());
    assertThat(((List) updatedConfiguration.getConfiguration().get(EmailAlarmCallback.CK_EMAIL_RECEIVERS)).size()).isEqualTo(1);
    assertThat(((List) updatedConfiguration.getConfiguration().get(EmailAlarmCallback.CK_EMAIL_RECEIVERS)).get(0)).isEqualTo("foo@bar.com");
    assertThat(((List) updatedConfiguration.getConfiguration().get(EmailAlarmCallback.CK_USER_RECEIVERS)).size()).isEqualTo(1);
    assertThat(((List) updatedConfiguration.getConfiguration().get(EmailAlarmCallback.CK_USER_RECEIVERS)).get(0)).isEqualTo("foouser");
    final ArgumentCaptor<BasicDBObject> queryCaptor = ArgumentCaptor.forClass(BasicDBObject.class);
    final ArgumentCaptor<BasicDBObject> updateCaptor = ArgumentCaptor.forClass(BasicDBObject.class);
    verify(this.dbCollection, times(1)).update(queryCaptor.capture(), updateCaptor.capture());
    assertThat(queryCaptor.getValue().toJson()).isEqualTo("{ \"_id\" : { \"$oid\" : \"" + matchingStreamId + "\" } }");
    assertThat(updateCaptor.getValue().toJson()).isEqualTo("{ \"$unset\" : { \"" + StreamImpl.FIELD_ALERT_RECEIVERS + "\" : \"\" } }");
    verifyMigrationCompletedWasPosted(ImmutableMap.of(matchingStreamId, Optional.of(alarmCallbackId)));
}
Also used : ObjectId(org.bson.types.ObjectId) Date(java.util.Date) BasicDBObject(com.mongodb.BasicDBObject) WriteResult(com.mongodb.WriteResult) AlertCondition(org.graylog2.plugin.alarms.AlertCondition) Stream(org.graylog2.plugin.streams.Stream) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) EmailAlarmCallback(org.graylog2.alarmcallbacks.EmailAlarmCallback) AlarmCallbackConfiguration(org.graylog2.alarmcallbacks.AlarmCallbackConfiguration) Test(org.junit.Test)

Example 12 with WriteResult

use of com.mongodb.WriteResult in project jackrabbit-oak by apache.

the class MongoCacheConsistencyTest method getFixture.

@Override
public DocumentStoreFixture getFixture() throws Exception {
    Fongo fongo = new OakFongo("fongo") {

        private String suppressedEx = null;

        @Override
        protected void afterInsert(WriteResult result) {
            maybeThrow();
        }

        @Override
        protected void afterFindAndModify(DBObject result) {
            maybeThrow();
        }

        @Override
        protected void afterUpdate(WriteResult result) {
            maybeThrow();
        }

        @Override
        protected void afterRemove(WriteResult result) {
            maybeThrow();
        }

        @Override
        protected void beforeExecuteBulkWriteOperation(boolean ordered, Boolean bypassDocumentValidation, List<?> writeRequests, WriteConcern aWriteConcern) {
            // suppress potentially set exception message because
            // fongo bulk writes call other update methods
            suppressedEx = exceptionMsg;
            exceptionMsg = null;
        }

        @Override
        protected void afterExecuteBulkWriteOperation(BulkWriteResult result) {
            exceptionMsg = suppressedEx;
            suppressedEx = null;
            maybeThrow();
        }

        private void maybeThrow() {
            if (exceptionMsg != null) {
                throw new MongoException(exceptionMsg);
            }
        }
    };
    DocumentMK.Builder builder = provider.newBuilder().setAsyncDelay(0);
    final DocumentStore store = new MongoDocumentStore(fongo.getDB("oak"), builder);
    return new DocumentStoreFixture() {

        @Override
        public String getName() {
            return "MongoDB";
        }

        @Override
        public DocumentStore createDocumentStore(int clusterId) {
            return store;
        }
    };
}
Also used : Fongo(com.github.fakemongo.Fongo) OakFongo(com.mongodb.OakFongo) MongoException(com.mongodb.MongoException) DocumentMK(org.apache.jackrabbit.oak.plugins.document.DocumentMK) OakFongo(com.mongodb.OakFongo) DBObject(com.mongodb.DBObject) BulkWriteResult(com.mongodb.BulkWriteResult) DocumentStore(org.apache.jackrabbit.oak.plugins.document.DocumentStore) WriteResult(com.mongodb.WriteResult) BulkWriteResult(com.mongodb.BulkWriteResult) DocumentStoreFixture(org.apache.jackrabbit.oak.plugins.document.DocumentStoreFixture) WriteConcern(com.mongodb.WriteConcern) List(java.util.List)

Example 13 with WriteResult

use of com.mongodb.WriteResult in project jackrabbit-oak by apache.

the class MongoBlobStore method countDeleteChunks.

@Override
public long countDeleteChunks(List<String> chunkIds, long maxLastModifiedTime) throws Exception {
    DBCollection collection = getBlobCollection();
    QueryBuilder queryBuilder = new QueryBuilder();
    if (chunkIds != null) {
        queryBuilder = queryBuilder.and(MongoBlob.KEY_ID).in(chunkIds.toArray(new String[0]));
        if (maxLastModifiedTime > 0) {
            queryBuilder = queryBuilder.and(MongoBlob.KEY_LAST_MOD).lessThan(maxLastModifiedTime);
        }
    }
    WriteResult result = collection.remove(queryBuilder.get());
    return result.getN();
}
Also used : DBCollection(com.mongodb.DBCollection) WriteResult(com.mongodb.WriteResult) QueryBuilder(com.mongodb.QueryBuilder)

Example 14 with WriteResult

use of com.mongodb.WriteResult in project jackrabbit-oak by apache.

the class MongoDocumentStore method findAndModify.

@SuppressWarnings("unchecked")
@CheckForNull
private <T extends Document> T findAndModify(Collection<T> collection, UpdateOp updateOp, boolean upsert, boolean checkConditions) {
    DBCollection dbCollection = getDBCollection(collection);
    // make sure we don't modify the original updateOp
    updateOp = updateOp.copy();
    DBObject update = createUpdate(updateOp, false);
    Lock lock = null;
    if (collection == Collection.NODES) {
        lock = nodeLocks.acquire(updateOp.getId());
    }
    final Stopwatch watch = startWatch();
    boolean newEntry = false;
    try {
        // get modCount of cached document
        Long modCount = null;
        T cachedDoc = null;
        if (collection == Collection.NODES) {
            cachedDoc = (T) nodesCache.getIfPresent(updateOp.getId());
            if (cachedDoc != null) {
                modCount = cachedDoc.getModCount();
            }
        }
        // if we have a matching modCount
        if (modCount != null) {
            QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
            query.and(Document.MOD_COUNT).is(modCount);
            WriteResult result = dbCollection.update(query.get(), update);
            if (result.getN() > 0) {
                // success, update cached document
                if (collection == Collection.NODES) {
                    NodeDocument newDoc = (NodeDocument) applyChanges(collection, cachedDoc, updateOp);
                    nodesCache.put(newDoc);
                }
                // return previously cached document
                return cachedDoc;
            }
        }
        // conditional update failed or not possible
        // perform operation and get complete document
        QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
        DBObject oldNode = dbCollection.findAndModify(query.get(), null, null, /*sort*/
        false, /*remove*/
        update, false, /*returnNew*/
        upsert);
        if (oldNode == null) {
            newEntry = true;
        }
        if (checkConditions && oldNode == null) {
            return null;
        }
        T oldDoc = convertFromDBObject(collection, oldNode);
        if (oldDoc != null) {
            if (collection == Collection.NODES) {
                NodeDocument newDoc = (NodeDocument) applyChanges(collection, oldDoc, updateOp);
                nodesCache.put(newDoc);
                updateLocalChanges(newDoc);
            }
            oldDoc.seal();
        } else if (upsert) {
            if (collection == Collection.NODES) {
                NodeDocument doc = (NodeDocument) collection.newDocument(this);
                UpdateUtils.applyChanges(doc, updateOp);
                nodesCache.putIfAbsent(doc);
                updateLocalChanges(doc);
            }
        } else {
        // updateOp without conditions and not an upsert
        // this means the document does not exist
        }
        return oldDoc;
    } catch (Exception e) {
        throw handleException(e, collection, updateOp.getId());
    } finally {
        if (lock != null) {
            lock.unlock();
        }
        stats.doneFindAndModify(watch.elapsed(TimeUnit.NANOSECONDS), collection, updateOp.getId(), newEntry, true, 0);
    }
}
Also used : DBCollection(com.mongodb.DBCollection) WriteResult(com.mongodb.WriteResult) BulkWriteResult(com.mongodb.BulkWriteResult) Stopwatch(com.google.common.base.Stopwatch) QueryBuilder(com.mongodb.QueryBuilder) NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) DBObject(com.mongodb.DBObject) BasicDBObject(com.mongodb.BasicDBObject) MongoException(com.mongodb.MongoException) DocumentStoreException(org.apache.jackrabbit.oak.plugins.document.DocumentStoreException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) BulkWriteException(com.mongodb.BulkWriteException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Lock(java.util.concurrent.locks.Lock) CheckForNull(javax.annotation.CheckForNull)

Example 15 with WriteResult

use of com.mongodb.WriteResult in project jackrabbit-oak by apache.

the class MongoDocumentStoreHelper method repair.

public static void repair(MongoDocumentStore store, String path) {
    DBCollection col = store.getDBCollection(NODES);
    String id = Utils.getIdFromPath(path);
    NodeDocument doc = store.find(NODES, id);
    if (doc == null) {
        System.out.println("No document for path " + path);
        return;
    }
    Set<Revision> changes = Sets.newHashSet();
    for (String key : doc.keySet()) {
        if (Utils.isPropertyName(key) || NodeDocument.isDeletedEntry(key)) {
            changes.addAll(NodeDocumentHelper.getLocalMap(doc, key).keySet());
        }
    }
    SortedMap<Revision, String> commitRoot = Maps.newTreeMap(NodeDocumentHelper.getLocalCommitRoot(doc));
    if (!commitRoot.keySet().retainAll(changes)) {
        System.out.println("Nothing to repair on " + path);
        return;
    }
    Number modCount = doc.getModCount();
    if (modCount == null) {
        System.err.println("Document does not have a modCount " + path);
        return;
    }
    DBObject query = QueryBuilder.start(Document.ID).is(id).and(Document.MOD_COUNT).is(modCount).get();
    DBObject cr = new BasicDBObject();
    for (Map.Entry<Revision, String> entry : commitRoot.entrySet()) {
        cr.put(entry.getKey().toString(), entry.getValue());
    }
    DBObject update = new BasicDBObject();
    update.put("$set", new BasicDBObject(NodeDocumentHelper.commitRoot(), cr));
    update.put("$inc", new BasicDBObject(Document.MOD_COUNT, 1L));
    WriteResult result = col.update(query, update);
    if (result.getN() == 1) {
        int num = NodeDocumentHelper.getLocalCommitRoot(doc).size() - commitRoot.size();
        System.out.println("Removed " + num + " _commitRoot entries on " + path);
    } else {
        System.out.println("Unable to repair " + path + " (concurrent update).");
    }
}
Also used : NodeDocument(org.apache.jackrabbit.oak.plugins.document.NodeDocument) BasicDBObject(com.mongodb.BasicDBObject) DBObject(com.mongodb.DBObject) DBCollection(com.mongodb.DBCollection) BasicDBObject(com.mongodb.BasicDBObject) WriteResult(com.mongodb.WriteResult) Revision(org.apache.jackrabbit.oak.plugins.document.Revision) Map(java.util.Map) SortedMap(java.util.SortedMap)

Aggregations

WriteResult (com.mongodb.WriteResult)27 BasicDBObject (com.mongodb.BasicDBObject)17 DBObject (com.mongodb.DBObject)14 DBCollection (com.mongodb.DBCollection)9 Test (org.junit.Test)5 BulkWriteResult (com.mongodb.BulkWriteResult)3 DB (com.mongodb.DB)3 Date (java.util.Date)3 List (java.util.List)3 ImmutableList (com.google.common.collect.ImmutableList)2 MongoException (com.mongodb.MongoException)2 QueryBuilder (com.mongodb.QueryBuilder)2 IOException (java.io.IOException)2 LinkedHashMap (java.util.LinkedHashMap)2 NodeDocument (org.apache.jackrabbit.oak.plugins.document.NodeDocument)2 ObjectId (org.bson.types.ObjectId)2 AlarmCallbackConfiguration (org.graylog2.alarmcallbacks.AlarmCallbackConfiguration)2 EmailAlarmCallback (org.graylog2.alarmcallbacks.EmailAlarmCallback)2 AlertCondition (org.graylog2.plugin.alarms.AlertCondition)2 Stream (org.graylog2.plugin.streams.Stream)2