use of com.eightkdata.mongowp.exceptions.MongoException in project torodb by torodb.
the class RecoveryService method cloneDatabases.
private void cloneDatabases(@Nonnull MongoClient remoteClient) throws CloningException, MongoException, UserException {
enableDataImportMode();
try {
Stream<String> dbNames;
try (MongoConnection remoteConnection = remoteClient.openConnection()) {
RemoteCommandResponse<ListDatabasesReply> remoteResponse = remoteConnection.execute(ListDatabasesCommand.INSTANCE, "admin", true, Empty.getInstance());
if (!remoteResponse.isOk()) {
throw remoteResponse.asMongoException();
}
dbNames = remoteResponse.getCommandReply().get().getDatabases().stream().map(db -> db.getName());
}
dbNames.filter(this::isReplicable).forEach(databaseName -> {
MyWritePermissionSupplier writePermissionSupplier = new MyWritePermissionSupplier(databaseName);
CloneOptions options = new CloneOptions(true, true, true, false, databaseName, Collections.<String>emptySet(), writePermissionSupplier, (colName) -> replFilters.getCollectionPredicate().test(databaseName, colName), (collection, indexName, unique, keys) -> replFilters.getIndexPredicate().test(databaseName, collection, indexName, unique, keys));
try {
cloner.cloneDatabase(databaseName, remoteClient, server, options);
} catch (MongoException ex) {
throw new CloningException(ex);
}
});
} finally {
disableDataImportMode();
}
}
use of com.eightkdata.mongowp.exceptions.MongoException in project torodb by torodb.
the class ReplSyncFetcher method fetch.
/**
*
* @param reader
* @return true iff rollback is needed
* @throws com.torodb.torod.mongodb.repl.ReplSyncFetcher.StopFetchException
* @throws com.torodb.torod.mongodb.repl.ReplSyncFetcher.RestartFetchException
*/
private boolean fetch(OplogReader reader) throws StopFetchException, RestartFetchException {
try {
MongoCursor<OplogOperation> cursor = reader.queryGte(lastFetchedOpTime);
Batch<OplogOperation> batch = cursor.fetchBatch();
postBatchChecks(reader, cursor, batch);
try {
if (isRollbackNeeded(reader, batch, lastFetchedOpTime, lastFetchedHash)) {
return true;
}
while (fetchIterationCanContinue()) {
if (!batch.hasNext()) {
preBatchChecks(batch);
batch = cursor.fetchBatch();
postBatchChecks(reader, cursor, batch);
continue;
}
if (batch.hasNext()) {
OplogOperation nextOp = batch.next();
assert nextOp != null;
boolean delivered = false;
while (!delivered) {
try {
LOGGER.debug("Delivered op: {}", nextOp);
callback.deliver(nextOp);
delivered = true;
opsReadCounter++;
} catch (InterruptedException ex) {
LOGGER.warn(serviceName() + " interrupted while a " + "message was being to deliver. Retrying", ex);
}
}
lastFetchedHash = nextOp.getHash();
lastFetchedOpTime = nextOp.getOpTime();
metrics.getLastOpTimeFetched().setValue(lastFetchedOpTime.toString());
}
}
} finally {
cursor.close();
}
} catch (MongoException ex) {
throw new RestartFetchException();
}
return false;
}
use of com.eightkdata.mongowp.exceptions.MongoException in project torodb by torodb.
the class OplogOperationApplier method insertIndex.
private void insertIndex(BsonDocument indexDoc, String database, ExclusiveWriteMongodTransaction trans) throws OplogApplyingException {
try {
CreateIndexesCommand command = CreateIndexesCommand.INSTANCE;
IndexOptions indexOptions = IndexOptions.unmarshall(indexDoc);
CreateIndexesArgument arg = new CreateIndexesArgument(indexOptions.getCollection(), Arrays.asList(new IndexOptions[] { indexOptions }));
Status executionResult = executeReplCommand(database, command, arg, trans.getTorodTransaction());
if (!executionResult.isOk()) {
throw new OplogApplyingException(new MongoException(executionResult));
}
} catch (MongoException ex) {
throw new OplogApplyingException(ex);
}
}
use of com.eightkdata.mongowp.exceptions.MongoException in project torodb by torodb.
the class OplogOperationApplier method applyCmd.
@SuppressWarnings("unchecked")
private void applyCmd(DbCmdOplogOperation op, ExclusiveWriteMongodTransaction trans, ApplierContext applierContext) throws OplogApplyingException {
LibraryEntry librayEntry = library.find(op.getRequest());
if (librayEntry == null) {
throw new OplogApplyingException(new CommandNotFoundException(op.getRequest().isEmpty() ? "?" : op.getRequest().getFirstEntry().getKey()));
}
Command command = librayEntry.getCommand();
if (command == null) {
BsonDocument document = op.getRequest();
if (document.isEmpty()) {
throw new OplogApplyingException(new CommandNotFoundException("Empty document query"));
}
String firstKey = document.getFirstEntry().getKey();
throw new OplogApplyingException(new CommandNotFoundException(firstKey));
}
Object arg;
try {
arg = command.unmarshallArg(op.getRequest(), librayEntry.getAlias());
} catch (MongoException ex) {
throw new OplogApplyingException(ex);
}
Status executionResult = executeReplCommand(op.getDatabase(), command, arg, trans.getTorodTransaction());
if (!executionResult.isOk()) {
throw new OplogApplyingException(new MongoException(executionResult));
}
}
use of com.eightkdata.mongowp.exceptions.MongoException in project torodb by torodb.
the class OplogOperationApplier method applyUpdate.
private void applyUpdate(UpdateOplogOperation op, ExclusiveWriteMongodTransaction trans, ApplierContext applierContext) throws OplogApplyingException {
boolean upsert = op.isUpsert() || applierContext.treatUpdateAsUpsert();
Status<UpdateResult> status;
try {
status = executeTorodCommand(op.getDatabase(), UpdateCommand.INSTANCE, new UpdateArgument(op.getCollection(), Collections.singletonList(new UpdateStatement(op.getFilter(), op.getModification(), upsert, true)), true, WriteConcern.fsync()), trans);
} catch (MongoException ex) {
throw new OplogApplyingException(ex);
}
if (!status.isOk()) {
//TODO: improve error code
throw new OplogApplyingException(new MongoException(status));
}
UpdateResult updateResult = status.getResult();
assert updateResult != null;
if (!updateResult.isOk()) {
throw new OplogApplyingException(new MongoException(updateResult.getErrorMessage(), ErrorCode.UNKNOWN_ERROR));
}
if (!upsert && updateResult.getModifiedCounter() != 0) {
LOGGER.info("Oplog update operation with optime {} and hash {} did not find the doc to " + "modify. Filter is {}", op.getOpTime(), op.getHash(), op.getFilter());
}
if (upsert && !updateResult.getUpserts().isEmpty()) {
LOGGER.warn("Replication couldn't find doc for op " + op);
}
}
Aggregations