use of org.syncany.plugins.transfer.files.DatabaseRemoteFile in project syncany by syncany.
the class ManySyncUpsAndDatabaseFileCleanupScenarioTest method testManySyncUpsAndDatabaseFileCleanup.
@Test
public void testManySyncUpsAndDatabaseFileCleanup() throws Exception {
// Setup
LocalTransferSettings testConnection = (LocalTransferSettings) TestConfigUtil.createTestLocalConnection();
TestClient clientA = new TestClient("A", testConnection);
// ROUND 1: many sync up (no cleanup expected here)
for (int i = 1; i <= 15; i++) {
clientA.createNewFile("file" + i, 1);
clientA.up();
}
for (int i = 1; i <= 15; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file SHOULD exist: " + expectedDatabaseFile, expectedDatabaseFile.exists());
}
// ROUND 2: 1x sync up (cleanup expected!)
clientA.createNewFile("file16", 1);
clientA.up();
// Force cleanup
clientA.cleanup();
for (int i = 1; i <= 15; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file should NOT exist: " + expectedDatabaseFile, !expectedDatabaseFile.exists());
}
for (int i = 17; i <= 17; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file SHOULD exist: " + expectedDatabaseFile, expectedDatabaseFile.exists());
}
// ROUND 3: many sync up (no cleanup expected here)
for (int i = 17; i <= 30; i++) {
clientA.createNewFile("file" + i, 1);
clientA.up();
}
for (int i = 1; i <= 16; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file should NOT exist: " + expectedDatabaseFile, !expectedDatabaseFile.exists());
}
for (int i = 17; i <= 31; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file SHOULD exist: " + expectedDatabaseFile, expectedDatabaseFile.exists());
}
// ROUND 4: 1x sync up (cleanup expected!)
clientA.createNewFile("file31", 1);
clientA.up();
CleanupOperationOptions options = new CleanupOperationOptions();
options.setForce(true);
// Force cleanup
clientA.cleanup(options);
for (int i = 1; i <= 32; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file should NOT exist: " + expectedDatabaseFile, !expectedDatabaseFile.exists());
}
for (int i = 33; i <= 33; i++) {
DatabaseRemoteFile expectedDatabaseRemoteFile = new DatabaseRemoteFile("A", i);
File expectedDatabaseFile = new File(testConnection.getPath() + "/databases/" + expectedDatabaseRemoteFile.getName());
assertTrue("Database file SHOULD exist: " + expectedDatabaseFile, expectedDatabaseFile.exists());
}
// Tear down
TestClient clientB = new TestClient("B", testConnection);
clientB.down();
assertSqlDatabaseEquals(clientA.getDatabaseFile(), clientB.getDatabaseFile());
clientA.deleteTestData();
clientB.deleteTestData();
}
use of org.syncany.plugins.transfer.files.DatabaseRemoteFile in project syncany by syncany.
the class FailedSplitSyncUpScenarioTest method testUpFailsOnFirstTransaction.
@Test
public void testUpFailsOnFirstTransaction() throws Exception {
// Inject failure for the second multichunk
UnreliableLocalTransferSettings testConnection = TestConfigUtil.createTestUnreliableLocalConnection(Arrays.asList("rel=[4567].+upload.+multichunk"));
TestClient clientA = new TestClient("A", testConnection);
UpOperationOptions options = new UpOperationOptions();
options.setTransactionSizeLimit(0L);
// Write three files (three transactions), with the first file spanning two multichunks
clientA.createNewFile("file1", 5 * 1024 * 1024);
clientA.createNewFile("file2", 1024);
clientA.createNewFile("file3", 1024);
// 1. Attempt upload, should fail
boolean operationFailed = false;
try {
clientA.up(options);
} catch (Exception ex) {
operationFailed = true;
}
assertTrue(operationFailed);
// 2. Verify local state
File stateDir = clientA.getConfig().getStateDir();
File cacheDir = clientA.getConfig().getCacheDir();
// Expecting: 3 transactions + 3 databases + transaction list + in-progress transaction
assertEquals(8, stateDir.listFiles().length);
// Expecting: 3 databases + 4 multichunks + in-progress transaction
assertEquals(8, cacheDir.listFiles().length);
// 3. Verify remote state
File repoActionsDir = new File(testConnection.getPath() + "/actions");
File repoDatabasesDir = new File(testConnection.getPath() + "/databases");
File repoMultichunksDir = new File(testConnection.getPath() + "/multichunks");
File repoTemporaryDir = new File(testConnection.getPath() + "/temporary");
File repoTransactionsDir = new File(testConnection.getPath() + "/transactions");
// Expecting that no databases/multichunks have been committed, 1 multichunk is temporary, 1 action and transaction are pending
assertEquals("One pending action should exist in repo", 1, repoActionsDir.listFiles().length);
assertEquals("No database should be committed in repo", 0, repoDatabasesDir.listFiles().length);
assertEquals("No multichunk should be committed in repo", 0, repoMultichunksDir.listFiles().length);
assertEquals("One multichunk should exist in repo as temporary", 1, repoTemporaryDir.listFiles().length);
assertEquals("One pending transaction should exist in repo", 1, repoTransactionsDir.listFiles().length);
// 4. Resume operation
clientA.up();
// 5. Final state should be as if no failure occurred; three database versions, three complete files
assertEquals("Three databases should be committed in repo", 3, repoDatabasesDir.listFiles().length);
for (int fileNumber = 1; fileNumber <= 3; fileNumber++) {
DatabaseRemoteFile databaseRemoteFile = new DatabaseRemoteFile("A", fileNumber);
File databaseFile = new File(testConnection.getPath() + "/databases/" + databaseRemoteFile.getName());
assertTrue("Database file should exist: " + databaseFile, databaseFile.exists());
}
assertEquals("Four multichunks should be committed in repo", 4, repoMultichunksDir.listFiles().length);
// Tear down
clientA.deleteTestData();
}
use of org.syncany.plugins.transfer.files.DatabaseRemoteFile in project syncany by syncany.
the class DownOperation method downloadUnknownRemoteDatabases.
/**
* Downloads the previously identified new/unknown remote databases to the local cache
* and returns a map with the local cache files mapped to the given remote database
* files. The method additionally fires events for every database it downloads.
*/
private SortedMap<File, DatabaseRemoteFile> downloadUnknownRemoteDatabases(List<DatabaseRemoteFile> unknownRemoteDatabases) throws StorageException {
logger.log(Level.INFO, "Downloading unknown databases.");
SortedMap<File, DatabaseRemoteFile> unknownRemoteDatabasesInCache = new TreeMap<File, DatabaseRemoteFile>();
int downloadFileIndex = 0;
for (DatabaseRemoteFile remoteFile : unknownRemoteDatabases) {
File unknownRemoteDatabaseFileInCache = config.getCache().getDatabaseFile(remoteFile.getName());
DatabaseRemoteFile unknownDatabaseRemoteFile = new DatabaseRemoteFile(remoteFile.getName());
logger.log(Level.INFO, "- Downloading {0} to local cache at {1}", new Object[] { remoteFile.getName(), unknownRemoteDatabaseFileInCache });
eventBus.post(new DownDownloadFileSyncExternalEvent(config.getLocalDir().getAbsolutePath(), "database", ++downloadFileIndex, unknownRemoteDatabases.size()));
transferManager.download(unknownDatabaseRemoteFile, unknownRemoteDatabaseFileInCache);
unknownRemoteDatabasesInCache.put(unknownRemoteDatabaseFileInCache, unknownDatabaseRemoteFile);
result.getDownloadedUnknownDatabases().add(remoteFile.getName());
}
return unknownRemoteDatabasesInCache;
}
use of org.syncany.plugins.transfer.files.DatabaseRemoteFile in project syncany by syncany.
the class CleanupOperation method writeMergeFile.
/**
* This method writes the file with merged databases for a single client and adds it to a Map containing all merged
* database files. This is done by querying the local database for all {@link DatabaseVersion}s by this client and
* serializing them.
*
* @param clientName for which we want to write the merged dataabse file.
* @param allMergedDatabaseFiles Map where we add the merged file once it is written.
*/
private void writeMergeFile(String clientName, Map<File, DatabaseRemoteFile> allMergedDatabaseFiles) throws StorageException, IOException {
// Increment the version by 1, to signal cleanup has occurred
long lastClientVersion = getNewestDatabaseFileVersion(clientName, localDatabase.getKnownDatabases());
DatabaseRemoteFile newRemoteMergeDatabaseFile = new DatabaseRemoteFile(clientName, lastClientVersion + 1);
File newLocalMergeDatabaseFile = config.getCache().getDatabaseFile(newRemoteMergeDatabaseFile.getName());
logger.log(Level.INFO, " + Writing new merge file (all files up to {0}) to {1} ...", new Object[] { lastClientVersion, newLocalMergeDatabaseFile });
Iterator<DatabaseVersion> lastNDatabaseVersions = localDatabase.getDatabaseVersionsTo(clientName, lastClientVersion);
DatabaseXmlSerializer databaseDAO = new DatabaseXmlSerializer(config.getTransformer());
databaseDAO.save(lastNDatabaseVersions, newLocalMergeDatabaseFile);
allMergedDatabaseFiles.put(newLocalMergeDatabaseFile, newRemoteMergeDatabaseFile);
}
use of org.syncany.plugins.transfer.files.DatabaseRemoteFile in project syncany by syncany.
the class DownOperation method populateDatabaseBranches.
/**
* This methods takes a Map containing DatabaseVersions (headers only) and loads these headers into {@link DatabaseBranches}.
* In addition, the local branch is added to this. The resulting DatabaseBranches will contain all headers exactly once,
* for the client that created that version.
*
* @param localBranch {@link DatabaseBranch} containing the locally known headers.
* @param remoteDatabaseHeaders Map from {@link DatabaseRemoteFile}s (important for client names) to the {@link DatabaseVersion}s that are
* contained in these files.
*
* @return DatabaseBranches filled with all the headers that originated from either of the parameters.
*/
private DatabaseBranches populateDatabaseBranches(DatabaseBranch localBranch, SortedMap<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders) {
DatabaseBranches allBranches = new DatabaseBranches();
allBranches.put(config.getMachineName(), localBranch.clone());
for (DatabaseRemoteFile remoteDatabaseFile : remoteDatabaseHeaders.keySet()) {
// Populate branches
DatabaseBranch remoteClientBranch = allBranches.getBranch(remoteDatabaseFile.getClientName(), true);
for (DatabaseVersion remoteDatabaseVersion : remoteDatabaseHeaders.get(remoteDatabaseFile)) {
DatabaseVersionHeader header = remoteDatabaseVersion.getHeader();
remoteClientBranch.add(header);
}
}
logger.log(Level.INFO, "Populated unknown branches: " + allBranches);
return allBranches;
}
Aggregations