use of org.syncany.plugins.transfer.RemoteTransaction in project syncany by syncany.
the class CleanupOperation method execute.
@Override
public CleanupOperationResult execute() throws Exception {
logger.log(Level.INFO, "");
logger.log(Level.INFO, "Running 'Cleanup' at client " + config.getMachineName() + " ...");
logger.log(Level.INFO, "--------------------------------------------");
// Do initial check out remote repository preconditions
CleanupResultCode preconditionResult = checkPreconditions();
fireStartEvent();
if (preconditionResult != CleanupResultCode.OK) {
fireEndEvent();
return new CleanupOperationResult(preconditionResult);
}
fireCleanupNeededEvent();
// At this point, the operation will lock the repository
startOperation();
// If other clients have unfinished transactions with deletions, do not proceed.
try {
transferManager.cleanTransactions();
} catch (BlockingTransfersException ignored) {
finishOperation();
fireEndEvent();
return new CleanupOperationResult(CleanupResultCode.NOK_REPO_BLOCKED);
}
// Wait two seconds (conservative cleanup, see #104)
logger.log(Level.INFO, "Cleanup: Waiting a while to be sure that no other actions are running ...");
Thread.sleep(BEFORE_DOUBLE_CHECK_TIME);
// Check again. No other clients should be busy, because we waited BEFORE_DOUBLE_CHECK_TIME
preconditionResult = checkPreconditions();
if (preconditionResult != CleanupResultCode.OK) {
finishOperation();
fireEndEvent();
return new CleanupOperationResult(preconditionResult);
}
// If we do cleanup, we are no longer allowed to resume a transaction
transferManager.clearResumableTransactions();
transferManager.clearPendingTransactions();
// Now do the actual work!
logger.log(Level.INFO, "Cleanup: Starting transaction.");
remoteTransaction = new RemoteTransaction(config, transferManager);
removeOldVersions();
if (options.isRemoveUnreferencedTemporaryFiles()) {
transferManager.removeUnreferencedTemporaryFiles();
}
mergeRemoteFiles();
// We went succesfully through the entire operation and checked everything. Hence we update the last cleanup time.
updateLastCleanupTime();
finishOperation();
fireEndEvent();
return updateResultCode(result);
}
use of org.syncany.plugins.transfer.RemoteTransaction in project syncany by syncany.
the class UpOperation method attemptResumeTransactions.
private Collection<RemoteTransaction> attemptResumeTransactions(Collection<Long> versions) {
try {
Collection<RemoteTransaction> remoteTransactions = new ArrayList<>();
for (Long version : versions) {
File transactionFile = config.getTransactionFile(version);
// If a single transaction file is missing, we should restart
if (!transactionFile.exists()) {
return null;
}
TransactionTO transactionTO = TransactionTO.load(null, transactionFile);
// Verify if all files needed are in cache.
for (ActionTO action : transactionTO.getActions()) {
if (action.getType() == ActionType.UPLOAD) {
if (action.getStatus() == ActionStatus.UNSTARTED) {
if (!action.getLocalTempLocation().exists()) {
// Unstarted upload has no cached local copy, abort
return null;
}
}
}
}
remoteTransactions.add(new RemoteTransaction(config, transferManager, transactionTO));
}
return remoteTransactions;
} catch (Exception e) {
logger.log(Level.WARNING, "Invalid transaction file. Cannot resume!");
return null;
}
}
use of org.syncany.plugins.transfer.RemoteTransaction in project syncany by syncany.
the class UpOperation method executeTransactions.
/**
* Transfers the given {@link DatabaseVersion} objects to the remote.
* Each {@link DatabaseVersion} will be transferred in its own {@link RemoteTransaction} object.
*
* This method resumes an interrupted sequence of earlier transactions.
* It expects the {@link DatabaseVersion} and {@link RemoteTransaction} files to be in the same order as they were originally generated.
* The first {@link DatabaseVersion} and {@link RemoteTransaction} objects should match the interrupted transaction.
*
* The assumption is that the given {@link RemoteTransaction} objects match the given {@link DatabaseVersion} objects.
* The given {@link TransactionRemoteFile} corresponds to the file on the remote from the interrupted transaction.
*
* @param databaseVersionQueue The {@link DatabaseVersion} objects to send to the remote.
* @param remoteTransactionsToResume {@link RemoteTransaction} objects that correspond to the given {@link DatabaseVersion} objects.
* @param transactionRemoteFileToResume The file on the remote that was used for the specific transaction that was interrupted.
*/
private int executeTransactions() throws Exception {
Iterator<RemoteTransaction> remoteTransactionsToResumeIterator = (resuming) ? remoteTransactionsToResume.iterator() : null;
// At this point, if a failure occurs from which we can resume, new transaction files will be written
// Delete any old transaction files
transferManager.clearPendingTransactions();
boolean detectedFailure = false;
Exception caughtFailure = null;
List<RemoteTransaction> remainingRemoteTransactions = new ArrayList<>();
List<DatabaseVersion> remainingDatabaseVersions = new ArrayList<>();
DatabaseVersion databaseVersion = databaseVersionQueue.take();
boolean noDatabaseVersions = databaseVersion.isEmpty();
// Add dirty data to first database
addDirtyData(databaseVersion);
//
while (!databaseVersion.isEmpty()) {
RemoteTransaction remoteTransaction = null;
if (!resuming) {
VectorClock newVectorClock = findNewVectorClock();
databaseVersion.setVectorClock(newVectorClock);
databaseVersion.setTimestamp(new Date());
databaseVersion.setClient(config.getMachineName());
remoteTransaction = new RemoteTransaction(config, transferManager);
// Add multichunks to transaction
logger.log(Level.INFO, "Uploading new multichunks ...");
// This call adds newly changed chunks to a "RemoteTransaction", so they can be uploaded later.
addMultiChunksToTransaction(remoteTransaction, databaseVersion.getMultiChunks());
} else {
remoteTransaction = remoteTransactionsToResumeIterator.next();
}
logger.log(Level.INFO, "Uploading database: " + databaseVersion);
// Create delta database and commit transaction
// The information about file changes is written to disk to locally "commit" the transaction. This
// enables Syncany to later resume the transaction if it is interrupted before completion.
writeAndAddDeltaDatabase(remoteTransaction, databaseVersion, resuming);
// This thread is to be run when the transaction is interrupted for connectivity reasons. It will serialize
// the transaction and metadata in memory such that the transaction can be resumed later.
Thread writeResumeFilesShutDownHook = createAndAddShutdownHook(remoteTransaction, databaseVersion);
// are confirmed to have been safely pushed to the remote, will the transaction be marked as complete.
if (!detectedFailure) {
boolean committingFailed = true;
try {
if (transactionRemoteFileToResume == null) {
remoteTransaction.commit();
} else {
remoteTransaction.commit(config.getTransactionFile(), transactionRemoteFileToResume);
transactionRemoteFileToResume = null;
}
logger.log(Level.INFO, "Persisting local SQL database (new database version {0}) ...", databaseVersion.getHeader().toString());
long newDatabaseVersionId = localDatabase.writeDatabaseVersion(databaseVersion);
logger.log(Level.INFO, "Removing DIRTY database versions from database ...");
localDatabase.removeDirtyDatabaseVersions(newDatabaseVersionId);
logger.log(Level.INFO, "Adding database version to result changes:" + databaseVersion);
addNewDatabaseChangesToResultChanges(databaseVersion, result.getChangeSet());
result.incrementTransactionsCompleted();
logger.log(Level.INFO, "Committing local database.");
localDatabase.commit();
committingFailed = false;
} catch (Exception e) {
detectedFailure = true;
caughtFailure = e;
} finally {
// The JVM has not shut down, so we can remove the shutdown hook.
// If it turns out that committing has failed, we run it explicitly.
removeShutdownHook(writeResumeFilesShutDownHook);
if (committingFailed) {
remainingRemoteTransactions.add(remoteTransaction);
remainingDatabaseVersions.add(databaseVersion);
}
}
} else {
remainingRemoteTransactions.add(remoteTransaction);
remainingDatabaseVersions.add(databaseVersion);
}
if (!noDatabaseVersions) {
logger.log(Level.FINE, "Waiting for new database version.");
databaseVersion = databaseVersionQueue.take();
logger.log(Level.FINE, "Took new database version: " + databaseVersion);
} else {
logger.log(Level.FINE, "Not waiting for new database version, last one has been taken.");
break;
}
}
if (detectedFailure) {
localDatabase.rollback();
serializeRemoteTransactionsAndMetadata(remainingRemoteTransactions, remainingDatabaseVersions);
throw caughtFailure;
}
return (int) result.getTransactionsCompleted();
}
Aggregations