use of com.mongodb.BasicDBObject in project morphia by mongodb.
the class DatastoreImpl method merge.
@Override
@SuppressWarnings("unchecked")
public <T> Key<T> merge(final T entity, final WriteConcern wc) {
T unwrapped = entity;
final LinkedHashMap<Object, DBObject> involvedObjects = new LinkedHashMap<Object, DBObject>();
final DBObject dbObj = mapper.toDBObject(unwrapped, involvedObjects);
final Key<T> key = mapper.getKey(unwrapped);
unwrapped = ProxyHelper.unwrap(unwrapped);
final Object id = mapper.getId(unwrapped);
if (id == null) {
throw new MappingException("Could not get id for " + unwrapped.getClass().getName());
}
// remove (immutable) _id field for update.
final Object idValue = dbObj.get(Mapper.ID_KEY);
dbObj.removeField(Mapper.ID_KEY);
WriteResult wr;
final MappedClass mc = mapper.getMappedClass(unwrapped);
final DBCollection dbColl = getCollection(unwrapped);
// try to do an update if there is a @Version field
wr = tryVersionedUpdate(dbColl, unwrapped, dbObj, idValue, new InsertOptions().writeConcern(wc), mc);
if (wr == null) {
final Query<T> query = (Query<T>) createQuery(unwrapped.getClass()).filter(Mapper.ID_KEY, id);
wr = update(query, new BasicDBObject("$set", dbObj), false, false, wc).getWriteResult();
}
final UpdateResults res = new UpdateResults(wr);
if (res.getUpdatedCount() == 0) {
throw new UpdateException("Nothing updated");
}
dbObj.put(Mapper.ID_KEY, idValue);
postSaveOperations(Collections.<Object>singletonList(entity), involvedObjects, dbColl, false);
return key;
}
use of com.mongodb.BasicDBObject in project morphia by mongodb.
the class DatastoreImpl method update.
@Override
public <T> UpdateResults update(final Query<T> query, final UpdateOperations<T> operations, final UpdateOptions options) {
DBCollection dbColl = query.getCollection();
// TODO remove this after testing.
if (dbColl == null) {
dbColl = getCollection(query.getEntityClass());
}
final MappedClass mc = getMapper().getMappedClass(query.getEntityClass());
final List<MappedField> fields = mc.getFieldsAnnotatedWith(Version.class);
DBObject queryObject = query.getQueryObject();
if (operations.isIsolated()) {
queryObject.put("$isolated", true);
}
if (!fields.isEmpty()) {
operations.inc(fields.get(0).getNameToStore(), 1);
}
final BasicDBObject update = (BasicDBObject) ((UpdateOpsImpl) operations).getOps();
if (LOG.isTraceEnabled()) {
LOG.trace(format("Executing update(%s) for query: %s, ops: %s, multi: %s, upsert: %s", dbColl.getName(), queryObject, update, options.isMulti(), options.isUpsert()));
}
return new UpdateResults(dbColl.update(queryObject, update, enforceWriteConcern(options, query.getEntityClass()).getOptions()));
}
use of com.mongodb.BasicDBObject in project morphia by mongodb.
the class DatastoreImpl method process.
void process(final MappedClass mc, final Validation validation) {
if (validation != null) {
String collectionName = mc.getCollectionName();
CommandResult result = getDB().command(new BasicDBObject("collMod", collectionName).append("validator", parse(validation.value())).append("validationLevel", validation.level().getValue()).append("validationAction", validation.action().getValue()));
if (!result.ok()) {
if (result.getInt("code") == 26) {
ValidationOptions options = new ValidationOptions().validator(parse(validation.value())).validationLevel(validation.level()).validationAction(validation.action());
getDatabase().createCollection(collectionName, new CreateCollectionOptions().validationOptions(options));
} else {
result.throwOnError();
}
}
}
}
use of com.mongodb.BasicDBObject in project morphia by mongodb.
the class DatastoreImpl method update.
@SuppressWarnings("unchecked")
private <T> UpdateResults update(final Query<T> query, final DBObject update, final UpdateOptions options) {
DBCollection dbColl = query.getCollection();
// TODO remove this after testing.
if (dbColl == null) {
dbColl = getCollection(query.getEntityClass());
}
if (query.getSortObject() != null && query.getSortObject().keySet() != null && !query.getSortObject().keySet().isEmpty()) {
throw new QueryException("sorting is not allowed for updates.");
}
if (query.getOffset() > 0) {
throw new QueryException("a query offset is not allowed for updates.");
}
if (query.getLimit() > 0) {
throw new QueryException("a query limit is not allowed for updates.");
}
DBObject queryObject = query.getQueryObject();
final MappedClass mc = getMapper().getMappedClass(query.getEntityClass());
final List<MappedField> fields = mc.getFieldsAnnotatedWith(Version.class);
if (!fields.isEmpty()) {
final MappedField versionMF = fields.get(0);
if (update.get(versionMF.getNameToStore()) == null) {
if (!update.containsField("$inc")) {
update.put("$inc", new BasicDBObject(versionMF.getNameToStore(), 1));
} else {
((Map<String, Object>) (update.get("$inc"))).put(versionMF.getNameToStore(), 1);
}
}
}
if (LOG.isTraceEnabled()) {
LOG.trace(format("Executing update(%s) for query: %s, ops: %s, multi: %s, upsert: %s", dbColl.getName(), queryObject, update, options.isMulti(), options.isUpsert()));
}
return new UpdateResults(dbColl.update(queryObject, update, enforceWriteConcern(options, query.getEntityClass()).getOptions()));
}
use of com.mongodb.BasicDBObject in project mongo-java-driver by mongodb.
the class GridFSTest method testInputStreamSkipping.
@Test
public void testInputStreamSkipping() throws Exception {
//int chunkSize = 5;
int chunkSize = GridFS.DEFAULT_CHUNKSIZE;
int fileSize = (int) (7.25 * chunkSize);
byte[] fileBytes = new byte[fileSize];
for (int idx = 0; idx < fileSize; ++idx) {
fileBytes[idx] = (byte) (idx % 251);
}
//Don't want chunks to be aligned at byte position 0
GridFSInputFile inputFile = gridFS.createFile(fileBytes);
inputFile.setFilename("input_stream_skipping.bin");
inputFile.save(chunkSize);
GridFSDBFile savedFile = gridFS.findOne(new BasicDBObject("_id", inputFile.getId()));
InputStream inputStream = savedFile.getInputStream();
//Quick run-through, make sure the file is as expected
for (int idx = 0; idx < fileSize; ++idx) {
assertEquals((byte) (idx % 251), (byte) inputStream.read());
}
inputStream = savedFile.getInputStream();
long skipped = inputStream.skip(1);
assertEquals(1, skipped);
int position = 1;
assertEquals((byte) (position++ % 251), (byte) inputStream.read());
skipped = inputStream.skip(chunkSize);
assertEquals(chunkSize, skipped);
position += chunkSize;
assertEquals((byte) (position++ % 251), (byte) inputStream.read());
skipped = inputStream.skip(-1);
assertEquals(0, skipped);
skipped = inputStream.skip(0);
assertEquals(0, skipped);
skipped = inputStream.skip(3 * chunkSize);
assertEquals(3 * chunkSize, skipped);
position += 3 * chunkSize;
assertEquals((byte) (position++ % 251), (byte) inputStream.read());
//Make sure skipping works when we skip to an exact chunk boundary
long toSkip = inputStream.available();
skipped = inputStream.skip(toSkip);
assertEquals(toSkip, skipped);
position += toSkip;
assertEquals((byte) (position++ % 251), (byte) inputStream.read());
skipped = inputStream.skip(2 * fileSize);
assertEquals(fileSize - position, skipped);
assertEquals(-1, inputStream.read());
}
Aggregations