use of com.torodb.mongodb.repl.oplogreplier.fetcher.OplogFetcher in project torodb by torodb.
the class RecoveryService method applyOplog.
/**
* Applies all the oplog operations stored on the remote server whose optime is higher than
* <em>from</em> but lower or equal than <em>to</em>.
*
* @param myOplog
* @param remoteOplog
* @param to
* @param from
*/
private void applyOplog(OplogReader remoteOplog, OpTime from, OpTime to) throws TryAgainException, MongoException, FatalErrorException {
MongoCursor<OplogOperation> oplogCursor = remoteOplog.between(from, true, to, true);
if (!oplogCursor.hasNext()) {
throw new OplogStartMissingException(remoteOplog.getSyncSource());
}
OplogOperation firstOp = oplogCursor.next();
if (!firstOp.getOpTime().equals(from)) {
throw new TryAgainException("Remote oplog does not cointain our last operation");
}
OplogFetcher fetcher = new LimitedOplogFetcher(oplogCursor);
ApplierContext context = new ApplierContext.Builder().setReapplying(true).setUpdatesAsUpserts(true).build();
try {
oplogApplier.apply(fetcher, context).waitUntilFinished();
} catch (StopReplicationException | RollbackReplicationException | CancellationException | UnexpectedOplogApplierException ex) {
throw new FatalErrorException(ex);
}
OpTime lastAppliedOptime;
try (ReadOplogTransaction oplogTrans = oplogManager.createReadTransaction()) {
lastAppliedOptime = oplogTrans.getLastAppliedOptime();
}
if (!lastAppliedOptime.equals(to)) {
LOGGER.warn("Unexpected optime for last operation to apply. " + "Expected " + to + ", but " + lastAppliedOptime + " found");
}
}
use of com.torodb.mongodb.repl.oplogreplier.fetcher.OplogFetcher in project torodb by torodb.
the class DefaultOplogApplier method apply.
@Override
public ApplyingJob apply(OplogFetcher fetcher, ApplierContext applierContext) {
Materializer materializer = ActorMaterializer.create(actorSystem);
RunnableGraph<Pair<UniqueKillSwitch, CompletionStage<Done>>> graph = createOplogSource(fetcher).async().via(createBatcherFlow(applierContext)).viaMat(KillSwitches.single(), Keep.right()).async().map(analyzedElem -> {
for (AnalyzedOplogBatch analyzedOplogBatch : analyzedElem.analyzedBatch) {
batchExecutor.apply(analyzedOplogBatch, applierContext);
}
return analyzedElem;
}).map(this::metricExecution).toMat(Sink.foreach(this::storeLastAppliedOp), (killSwitch, completionStage) -> new Pair<>(killSwitch, completionStage));
Pair<UniqueKillSwitch, CompletionStage<Done>> pair = graph.run(materializer);
UniqueKillSwitch killSwitch = pair.first();
CompletableFuture<Empty> whenComplete = pair.second().toCompletableFuture().thenApply(done -> Empty.getInstance()).whenComplete((done, t) -> {
fetcher.close();
if (done != null) {
LOGGER.trace("Oplog replication stream finished normally");
} else {
Throwable cause;
if (t instanceof CompletionException) {
cause = t.getCause();
} else {
cause = t;
}
//the completable future has been cancelled
if (cause instanceof CancellationException) {
LOGGER.debug("Oplog replication stream has been cancelled");
killSwitch.shutdown();
} else {
//in this case the exception should came from the stream
cause = Throwables.getRootCause(cause);
LOGGER.error("Oplog replication stream finished exceptionally: " + cause.getLocalizedMessage(), cause);
//the stream should be finished exceptionally, but just in case we
//notify the kill switch to stop the stream.
killSwitch.shutdown();
}
}
});
return new DefaultApplyingJob(killSwitch, whenComplete);
}
Aggregations