use of com.mongodb.BulkWriteOperation in project GNS by MobilityFirst.
the class MongoRecords method bulkUpdate.
/**
*
* @param collectionName
* @param values
* @throws FailedDBOperationException
* @throws RecordExistsException
*/
public void bulkUpdate(String collectionName, Map<String, JSONObject> values) throws FailedDBOperationException, RecordExistsException {
//String primaryKey = mongoCollectionSpecs.getCollectionSpec(collectionName).getPrimaryKey().getName();
DBCollection collection = db.getCollection(collectionName);
String primaryKey = mongoCollectionSpecs.getCollectionSpec(collectionName).getPrimaryKey().getName();
db.requestEnsureConnection();
BulkWriteOperation unordered = collection.initializeUnorderedBulkOperation();
for (Map.Entry<String, JSONObject> entry : values.entrySet()) {
BasicDBObject query = new BasicDBObject(primaryKey, entry.getKey());
JSONObject value = entry.getValue();
if (value != null) {
DBObject document;
try {
document = (DBObject) JSON.parse(value.toString());
} catch (Exception e) {
throw new FailedDBOperationException(collectionName, "bulkUpdate", "Unable to parse json" + e.getMessage());
}
unordered.find(query).upsert().replaceOne(document);
} else {
unordered.find(query).removeOne();
}
}
// Maybe check the result?
BulkWriteResult result = unordered.execute();
}
use of com.mongodb.BulkWriteOperation in project GeoGig by boundlessgeo.
the class MongoObjectDatabase method putAll.
@Override
public void putAll(Iterator<? extends RevObject> objects, BulkOpListener listener) {
Preconditions.checkNotNull(executor, "executor service not set");
if (!objects.hasNext()) {
return;
}
final int bulkSize = 1000;
final int maxRunningTasks = 10;
final AtomicBoolean cancelCondition = new AtomicBoolean();
List<ObjectId> ids = Lists.newArrayListWithCapacity(bulkSize);
List<Future<?>> runningTasks = new ArrayList<Future<?>>(maxRunningTasks);
BulkWriteOperation bulkOperation = collection.initializeOrderedBulkOperation();
try {
while (objects.hasNext()) {
RevObject object = objects.next();
bulkOperation.insert(toDocument(object));
ids.add(object.getId());
if (ids.size() == bulkSize || !objects.hasNext()) {
InsertTask task = new InsertTask(bulkOperation, listener, ids, cancelCondition);
runningTasks.add(executor.submit(task));
if (objects.hasNext()) {
bulkOperation = collection.initializeOrderedBulkOperation();
ids = Lists.newArrayListWithCapacity(bulkSize);
}
}
if (runningTasks.size() == maxRunningTasks) {
waitForTasks(runningTasks);
}
}
waitForTasks(runningTasks);
} catch (RuntimeException e) {
cancelCondition.set(true);
throw e;
}
}
use of com.mongodb.BulkWriteOperation in project mongo-hadoop by mongodb.
the class MongoOutputCommitter method commitTask.
public void commitTask(final CompatUtils.TaskAttemptContext taskContext) throws IOException {
LOG.info("Committing task.");
collection = MongoConfigUtil.getOutputCollection(taskContext.getConfiguration());
// Get temporary file.
Path tempFilePath = getTaskAttemptPath(taskContext);
LOG.info("Committing from temporary file: " + tempFilePath.toString());
long filePos = 0, fileLen;
FSDataInputStream inputStream = null;
try {
FileSystem fs = FileSystem.get(taskContext.getConfiguration());
inputStream = fs.open(tempFilePath);
fileLen = fs.getFileStatus(tempFilePath).getLen();
} catch (IOException e) {
LOG.error("Could not open temporary file for committing", e);
cleanupAfterCommit(inputStream, taskContext);
throw e;
}
int maxDocs = MongoConfigUtil.getBatchSize(taskContext.getConfiguration());
int curBatchSize = 0;
BulkWriteOperation bulkOp;
if (MongoConfigUtil.isBulkOrdered(taskContext.getConfiguration())) {
bulkOp = collection.initializeOrderedBulkOperation();
} else {
bulkOp = collection.initializeUnorderedBulkOperation();
}
// Read Writables out of the temporary file.
BSONWritable bw = new BSONWritable();
MongoUpdateWritable muw = new MongoUpdateWritable();
while (filePos < fileLen) {
try {
// Determine writable type, and perform corresponding operation
// on MongoDB.
int mwType = inputStream.readInt();
if (MongoWritableTypes.BSON_WRITABLE == mwType) {
bw.readFields(inputStream);
bulkOp.insert(new BasicDBObject(bw.getDoc().toMap()));
} else if (MongoWritableTypes.MONGO_UPDATE_WRITABLE == mwType) {
muw.readFields(inputStream);
DBObject query = new BasicDBObject(muw.getQuery().toMap());
DBObject modifiers = new BasicDBObject(muw.getModifiers().toMap());
BulkWriteRequestBuilder writeBuilder = bulkOp.find(query);
if (muw.isReplace()) {
writeBuilder.replaceOne(modifiers);
} else if (muw.isUpsert()) {
BulkUpdateRequestBuilder updateBuilder = writeBuilder.upsert();
if (muw.isMultiUpdate()) {
updateBuilder.update(modifiers);
} else {
updateBuilder.updateOne(modifiers);
}
} else {
// No-upsert update.
if (muw.isMultiUpdate()) {
writeBuilder.update(modifiers);
} else {
writeBuilder.updateOne(modifiers);
}
}
} else {
throw new IOException("Unrecognized type: " + mwType);
}
filePos = inputStream.getPos();
// operation to be performed for the Task.
if (++curBatchSize >= maxDocs || filePos >= fileLen) {
try {
bulkOp.execute();
} catch (MongoException e) {
LOG.error("Could not write to MongoDB", e);
throw e;
}
bulkOp = collection.initializeOrderedBulkOperation();
curBatchSize = 0;
// Signal progress back to Hadoop framework so that we
// don't time out.
taskContext.progress();
}
} catch (IOException e) {
LOG.error("Error reading from temporary file", e);
throw e;
}
}
cleanupAfterCommit(inputStream, taskContext);
}
Aggregations