use of com.eightkdata.mongowp.client.core.UnreachableMongoServerException in project torodb by torodb.
the class TopologyHeartbeatHandler method handleHeartbeatError.
@Nonnull
private RemoteCommandResponse<ReplSetHeartbeatReply> handleHeartbeatError(Throwable t, Instant start) {
Duration d = Duration.between(clock.instant(), start);
ErrorCode errorCode;
if (t instanceof MongoException) {
return new FromExceptionRemoteCommandRequest((MongoException) t, d);
} else if (t instanceof UnreachableMongoServerException) {
errorCode = ErrorCode.HOST_UNREACHABLE;
} else {
if (!(t instanceof MongoRuntimeException) && !(t instanceof UnreachableMongoServerException)) {
LOGGER.warn("Unexpected exception {} catched by the topology " + "heartbeat handler", t.getClass().getSimpleName());
}
errorCode = ErrorCode.UNKNOWN_ERROR;
}
return new ErroneousRemoteCommandResponse<>(errorCode, t.getLocalizedMessage(), d);
}
use of com.eightkdata.mongowp.client.core.UnreachableMongoServerException in project torodb by torodb.
the class RecoveryService method initialSync.
private boolean initialSync() throws TryAgainException, FatalErrorException {
/*
* 1. store that data is inconsistent 2. decide a sync source 3. lastRemoteOptime1 = get the
* last optime of the sync source 4. clone all databases except local 5. lastRemoteOptime2 = get
* the last optime of the sync source 6. apply remote oplog from lastRemoteOptime1 to
* lastRemoteOptime2 7. lastRemoteOptime3 = get the last optime of the sync source 8. apply
* remote oplog from lastRemoteOptime2 to lastRemoteOptime3 9. rebuild indexes 10. store
* lastRemoteOptime3 as the last applied operation optime 11. store that data is consistent 12.
* change replication state to SECONDARY
*/
//TODO: Support fastsync (used to restore a node by copying the data from other up-to-date node)
LOGGER.info("Starting initial sync");
callback.setConsistentState(false);
HostAndPort syncSource;
try {
syncSource = syncSourceProvider.newSyncSource();
LOGGER.info("Using node " + syncSource + " to replicate from");
} catch (NoSyncSourceFoundException ex) {
throw new TryAgainException("No sync source");
}
MongoClient remoteClient;
try {
remoteClient = remoteClientFactory.createClient(syncSource);
} catch (UnreachableMongoServerException ex) {
throw new TryAgainException(ex);
}
try {
LOGGER.debug("Remote client obtained");
MongoConnection remoteConnection = remoteClient.openConnection();
try (OplogReader reader = oplogReaderProvider.newReader(remoteConnection)) {
OplogOperation lastClonedOp = reader.getLastOp();
OpTime lastRemoteOptime1 = lastClonedOp.getOpTime();
try (WriteOplogTransaction oplogTransaction = oplogManager.createWriteTransaction()) {
LOGGER.info("Remote database cloning started");
oplogTransaction.truncate();
LOGGER.info("Local databases dropping started");
Status<?> status = dropDatabases();
if (!status.isOk()) {
throw new TryAgainException("Error while trying to drop collections: " + status);
}
LOGGER.info("Local databases dropping finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Remote database cloning started");
cloneDatabases(remoteClient);
LOGGER.info("Remote database cloning finished");
oplogTransaction.forceNewValue(lastClonedOp.getHash(), lastClonedOp.getOpTime());
}
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
TorodServer torodServer = server.getTorodServer();
try (TorodConnection connection = torodServer.openConnection();
SharedWriteTorodTransaction trans = connection.openWriteTransaction(false)) {
OpTime lastRemoteOptime2 = reader.getLastOp().getOpTime();
LOGGER.info("First oplog application started");
applyOplog(reader, lastRemoteOptime1, lastRemoteOptime2);
trans.commit();
LOGGER.info("First oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
OplogOperation lastOperation = reader.getLastOp();
OpTime lastRemoteOptime3 = lastOperation.getOpTime();
LOGGER.info("Second oplog application started");
applyOplog(reader, lastRemoteOptime2, lastRemoteOptime3);
trans.commit();
LOGGER.info("Second oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Index rebuild started");
rebuildIndexes();
trans.commit();
LOGGER.info("Index rebuild finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
trans.commit();
}
} catch (OplogStartMissingException ex) {
throw new TryAgainException(ex);
} catch (OplogOperationUnsupported ex) {
throw new TryAgainException(ex);
} catch (MongoException | RollbackException ex) {
throw new TryAgainException(ex);
} catch (OplogManagerPersistException ex) {
throw new FatalErrorException();
} catch (UserException ex) {
throw new FatalErrorException(ex);
}
callback.setConsistentState(true);
LOGGER.info("Initial sync finished");
} finally {
remoteClient.close();
}
return true;
}
use of com.eightkdata.mongowp.client.core.UnreachableMongoServerException in project torodb by torodb.
the class ReplSyncFetcher method runProtected.
@Override
public void runProtected() {
runThread = Thread.currentThread();
boolean rollbackNeeded = false;
try {
OplogReader oplogReader = null;
while (!rollbackNeeded && isRunning()) {
try {
if (callback.shouldPause()) {
callback.awaitUntilUnpaused();
}
callback.awaitUntilAllFetchedAreApplied();
HostAndPort syncSource = null;
try {
syncSource = syncSourceProvider.newSyncSource(lastFetchedOpTime);
oplogReader = readerProvider.newReader(syncSource);
} catch (NoSyncSourceFoundException ex) {
LOGGER.warn("There is no source to sync from");
Thread.sleep(1000);
continue;
} catch (UnreachableMongoServerException ex) {
assert syncSource != null;
LOGGER.warn("It was impossible to reach the sync source " + syncSource);
Thread.sleep(1000);
continue;
}
rollbackNeeded = fetch(oplogReader);
} catch (InterruptedException ex) {
LOGGER.info("Interrupted fetch process", ex);
} catch (RestartFetchException ex) {
LOGGER.info("Restarting fetch process", ex);
} catch (Throwable ex) {
throw new StopFetchException(ex);
} finally {
if (oplogReader != null) {
oplogReader.close();
}
}
}
if (rollbackNeeded) {
LOGGER.info("Requesting rollback");
callback.rollback(oplogReader);
} else {
LOGGER.info(serviceName() + " ending by external request");
callback.fetchFinished();
}
} catch (StopFetchException ex) {
LOGGER.info(serviceName() + " stopped by self request");
callback.fetchAborted(ex);
}
LOGGER.info(serviceName() + " stopped");
}
Aggregations