use of com.mongodb.MongoBulkWriteException in project zeppelin by apache.
the class MongoNotebookRepo method insertFileSystemNotes.
/**
* If environment variable ZEPPELIN_NOTEBOOK_MONGO_AUTOIMPORT is true,
* this method will insert local notes into MongoDB on startup.
* If a note already exists in MongoDB, skip it.
*/
private void insertFileSystemNotes() throws IOException {
// docs to be imported
LinkedList<Document> docs = new LinkedList<>();
NotebookRepo vfsRepo = new VFSNotebookRepo(this.conf);
List<NoteInfo> infos = vfsRepo.list(null);
// collect notes to be imported
for (NoteInfo info : infos) {
Note note = vfsRepo.get(info.getId(), null);
Document doc = noteToDocument(note);
docs.add(doc);
}
/*
* 'ordered(false)' option allows to proceed bulk inserting even though
* there are duplicated documents. The duplicated documents will be skipped
* and print a WARN log.
*/
try {
coll.insertMany(docs, new InsertManyOptions().ordered(false));
} catch (MongoBulkWriteException e) {
//print duplicated document warning log
printDuplicatedException(e);
}
// it does nothing for now but maybe in the future...
vfsRepo.close();
}
use of com.mongodb.MongoBulkWriteException in project mongo-java-driver by mongodb.
the class WriteCommandProtocol method execute.
@Override
public BulkWriteResult execute(final InternalConnection connection) {
BaseWriteCommandMessage message = createRequestMessage(getMessageSettings(connection.getDescription()));
long startTimeNanos = System.nanoTime();
try {
BulkWriteBatchCombiner bulkWriteBatchCombiner = new BulkWriteBatchCombiner(connection.getDescription().getServerAddress(), ordered, writeConcern);
int batchNum = 0;
int currentRangeStartIndex = 0;
do {
batchNum++;
startTimeNanos = System.nanoTime();
BaseWriteCommandMessage nextMessage = sendMessage(connection, message, batchNum);
int itemCount = nextMessage != null ? message.getItemCount() - nextMessage.getItemCount() : message.getItemCount();
IndexMap indexMap = IndexMap.create(currentRangeStartIndex, itemCount);
BsonDocument result = receiveMessage(connection, message);
if (nextMessage != null || batchNum > 1) {
if (getLogger().isDebugEnabled()) {
getLogger().debug(format("Received response for batch %d", batchNum));
}
}
if (WriteCommandResultHelper.hasError(result)) {
MongoBulkWriteException bulkWriteException = getBulkWriteException(getType(), result, connection.getDescription().getServerAddress());
bulkWriteBatchCombiner.addErrorResult(bulkWriteException, indexMap);
} else {
bulkWriteBatchCombiner.addResult(getBulkWriteResult(getType(), result), indexMap);
}
sendSucceededEvent(connection, message, startTimeNanos, result);
currentRangeStartIndex += itemCount;
message = nextMessage;
} while (message != null && !bulkWriteBatchCombiner.shouldStopSendingMoreBatches());
return bulkWriteBatchCombiner.getResult();
} catch (MongoBulkWriteException e) {
throw e;
} catch (RuntimeException e) {
sendFailedEvent(connection, message, startTimeNanos, e);
throw e;
}
}
Aggregations