use of com.eightkdata.mongowp.OpTime in project torodb by torodb.
the class OplogOperationParser method fromBson.
public static OplogOperation fromBson(@Nonnull BsonValue uncastedOp) throws BadValueException, TypesMismatchException, NoSuchKeyException {
if (!uncastedOp.isDocument()) {
throw new BadValueException("found a " + uncastedOp.getType().toString().toLowerCase(Locale.ROOT) + " where a document that represents a oplog operation " + "was expected");
}
BsonDocument doc = uncastedOp.asDocument();
OplogOperationType opType;
String opString = BsonReaderTool.getString(doc, "op");
try {
opType = OplogOperationType.fromOplogName(opString);
} catch (IllegalArgumentException ex) {
throw new BadValueException("Unknown oplog operation with type '" + opString + "'");
}
String ns;
try {
ns = BsonReaderTool.getString(doc, "ns");
} catch (NoSuchKeyException ex) {
throw new NoSuchKeyException("ns", "op does not contain required \"ns\" field: " + uncastedOp);
} catch (TypesMismatchException ex) {
throw ex.newWithMessage("\"ns\" field is not a string: " + uncastedOp);
}
if (ns.isEmpty() && !opType.equals(OplogOperationType.NOOP)) {
throw new BadValueException("\"ns\" field value cannot be empty " + "when op type is not 'n': " + doc);
}
String db;
String collection;
int firstDotIndex = ns.indexOf('.');
if (firstDotIndex == -1 || firstDotIndex + 1 == ns.length()) {
db = ns;
collection = null;
} else {
db = ns.substring(0, firstDotIndex);
collection = ns.substring(firstDotIndex + 1);
}
OpTime optime = OpTime.fromOplogEntry(doc);
long h = BsonReaderTool.getLong(doc, "h");
OplogVersion version = OplogVersion.valueOf(BsonReaderTool.getInteger(doc, "v"));
//Note: Mongodb v3 checks if the key exists or not, but doesn't check the value
boolean fromMigrate = doc.containsKey("fromMigrate");
BsonDocument o = BsonReaderTool.getDocument(doc, "o");
switch(opType) {
case DB:
return new DbOplogOperation(db, optime, h, version, fromMigrate);
case DB_CMD:
return new DbCmdOplogOperation(o, db, optime, h, version, fromMigrate);
case DELETE:
return new DeleteOplogOperation(o, db, collection, optime, h, version, fromMigrate, BsonReaderTool.getBoolean(doc, "b", false));
case INSERT:
//TODO: parse b
return new InsertOplogOperation(o, db, collection, optime, h, version, fromMigrate);
case NOOP:
return new NoopOplogOperation(o, db, optime, h, version, fromMigrate);
case UPDATE:
return new UpdateOplogOperation(BsonReaderTool.getDocument(doc, "o2"), db, collection, optime, h, version, fromMigrate, o, BsonReaderTool.getBoolean(doc, "b", false));
default:
throw new AssertionError(OplogOperationParser.class + " is not prepared to work with oplog operations of type " + opType);
}
}
use of com.eightkdata.mongowp.OpTime in project torodb by torodb.
the class RecoveryService method initialSync.
private boolean initialSync() throws TryAgainException, FatalErrorException {
/*
* 1. store that data is inconsistent 2. decide a sync source 3. lastRemoteOptime1 = get the
* last optime of the sync source 4. clone all databases except local 5. lastRemoteOptime2 = get
* the last optime of the sync source 6. apply remote oplog from lastRemoteOptime1 to
* lastRemoteOptime2 7. lastRemoteOptime3 = get the last optime of the sync source 8. apply
* remote oplog from lastRemoteOptime2 to lastRemoteOptime3 9. rebuild indexes 10. store
* lastRemoteOptime3 as the last applied operation optime 11. store that data is consistent 12.
* change replication state to SECONDARY
*/
//TODO: Support fastsync (used to restore a node by copying the data from other up-to-date node)
LOGGER.info("Starting initial sync");
callback.setConsistentState(false);
HostAndPort syncSource;
try {
syncSource = syncSourceProvider.newSyncSource();
LOGGER.info("Using node " + syncSource + " to replicate from");
} catch (NoSyncSourceFoundException ex) {
throw new TryAgainException("No sync source");
}
MongoClient remoteClient;
try {
remoteClient = remoteClientFactory.createClient(syncSource);
} catch (UnreachableMongoServerException ex) {
throw new TryAgainException(ex);
}
try {
LOGGER.debug("Remote client obtained");
MongoConnection remoteConnection = remoteClient.openConnection();
try (OplogReader reader = oplogReaderProvider.newReader(remoteConnection)) {
OplogOperation lastClonedOp = reader.getLastOp();
OpTime lastRemoteOptime1 = lastClonedOp.getOpTime();
try (WriteOplogTransaction oplogTransaction = oplogManager.createWriteTransaction()) {
LOGGER.info("Remote database cloning started");
oplogTransaction.truncate();
LOGGER.info("Local databases dropping started");
Status<?> status = dropDatabases();
if (!status.isOk()) {
throw new TryAgainException("Error while trying to drop collections: " + status);
}
LOGGER.info("Local databases dropping finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Remote database cloning started");
cloneDatabases(remoteClient);
LOGGER.info("Remote database cloning finished");
oplogTransaction.forceNewValue(lastClonedOp.getHash(), lastClonedOp.getOpTime());
}
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
TorodServer torodServer = server.getTorodServer();
try (TorodConnection connection = torodServer.openConnection();
SharedWriteTorodTransaction trans = connection.openWriteTransaction(false)) {
OpTime lastRemoteOptime2 = reader.getLastOp().getOpTime();
LOGGER.info("First oplog application started");
applyOplog(reader, lastRemoteOptime1, lastRemoteOptime2);
trans.commit();
LOGGER.info("First oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
OplogOperation lastOperation = reader.getLastOp();
OpTime lastRemoteOptime3 = lastOperation.getOpTime();
LOGGER.info("Second oplog application started");
applyOplog(reader, lastRemoteOptime2, lastRemoteOptime3);
trans.commit();
LOGGER.info("Second oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Index rebuild started");
rebuildIndexes();
trans.commit();
LOGGER.info("Index rebuild finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
trans.commit();
}
} catch (OplogStartMissingException ex) {
throw new TryAgainException(ex);
} catch (OplogOperationUnsupported ex) {
throw new TryAgainException(ex);
} catch (MongoException | RollbackException ex) {
throw new TryAgainException(ex);
} catch (OplogManagerPersistException ex) {
throw new FatalErrorException();
} catch (UserException ex) {
throw new FatalErrorException(ex);
}
callback.setConsistentState(true);
LOGGER.info("Initial sync finished");
} finally {
remoteClient.close();
}
return true;
}
use of com.eightkdata.mongowp.OpTime in project torodb by torodb.
the class RecoveryService method applyOplog.
/**
* Applies all the oplog operations stored on the remote server whose optime is higher than
* <em>from</em> but lower or equal than <em>to</em>.
*
* @param myOplog
* @param remoteOplog
* @param to
* @param from
*/
private void applyOplog(OplogReader remoteOplog, OpTime from, OpTime to) throws TryAgainException, MongoException, FatalErrorException {
MongoCursor<OplogOperation> oplogCursor = remoteOplog.between(from, true, to, true);
if (!oplogCursor.hasNext()) {
throw new OplogStartMissingException(remoteOplog.getSyncSource());
}
OplogOperation firstOp = oplogCursor.next();
if (!firstOp.getOpTime().equals(from)) {
throw new TryAgainException("Remote oplog does not cointain our last operation");
}
OplogFetcher fetcher = new LimitedOplogFetcher(oplogCursor);
ApplierContext context = new ApplierContext.Builder().setReapplying(true).setUpdatesAsUpserts(true).build();
try {
oplogApplier.apply(fetcher, context).waitUntilFinished();
} catch (StopReplicationException | RollbackReplicationException | CancellationException | UnexpectedOplogApplierException ex) {
throw new FatalErrorException(ex);
}
OpTime lastAppliedOptime;
try (ReadOplogTransaction oplogTrans = oplogManager.createReadTransaction()) {
lastAppliedOptime = oplogTrans.getLastAppliedOptime();
}
if (!lastAppliedOptime.equals(to)) {
LOGGER.warn("Unexpected optime for last operation to apply. " + "Expected " + to + ", but " + lastAppliedOptime + " found");
}
}
use of com.eightkdata.mongowp.OpTime in project torodb by torodb.
the class SequentialOplogApplierService method startUp.
@Override
protected void startUp() {
callback.waitUntilStartPermision();
LOGGER.info("Starting SECONDARY service");
paused = false;
fetcherIsPaused = false;
pauseRequested = false;
long lastAppliedHash;
OpTime lastAppliedOptime;
try (OplogManager.ReadOplogTransaction oplogReadTrans = oplogManager.createReadTransaction()) {
lastAppliedHash = oplogReadTrans.getLastAppliedHash();
lastAppliedOptime = oplogReadTrans.getLastAppliedOptime();
}
fetcherService = new ReplSyncFetcher(threadFactory, new FetcherView(), oplogFetcherFactory.createFetcher(lastAppliedHash, lastAppliedOptime));
fetcherService.startAsync();
applierService = new ReplSyncApplier(threadFactory, oplogOpApplier, server, oplogManager, new ApplierView());
applierService.startAsync();
fetcherService.awaitRunning();
applierService.awaitRunning();
LOGGER.info("Started SECONDARY service");
}
use of com.eightkdata.mongowp.OpTime in project torodb by torodb.
the class StaticOplogReader method getBetweenIterator.
private Iterator<OplogOperation> getBetweenIterator(OpTime from, boolean includeFrom, OpTime to, boolean includeTo) {
OpTime includedFrom;
OpTime excludedTo;
if (includeFrom || !oplog.containsKey(from)) {
includedFrom = from;
} else {
//_from_ is excluded, but subMap includes it!
SortedMap<OpTime, OplogOperation> tailMap = oplog.tailMap(from);
if (tailMap.size() > 1) {
includedFrom = tailMap.keySet().iterator().next();
} else {
//the _from_ key is the only key greater or equal than _from_ and we want to exclude it
return Collections.emptyIterator();
}
}
Iterator<OplogOperation> excludingIt = oplog.subMap(includedFrom, to).values().iterator();
if (includeTo) {
OplogOperation toOp = oplog.get(to);
if (toOp != null) {
return Iterators.concat(excludingIt, Collections.singleton(toOp).iterator());
}
}
return excludingIt;
}
Aggregations