use of org.syncany.database.MultiChunkEntry in project syncany by syncany.
the class CleanupOperation method removeOldVersions.
/**
* This method checks if there exist {@link FileVersion}s which are to be deleted because the history they are a part
* of is too long. It will collect these, remove them locally and add them to the {@link RemoteTransaction} for deletion.
*/
private void removeOldVersions() throws Exception {
Map<FileHistoryId, List<FileVersion>> purgeFileVersions = new TreeMap<FileHistoryId, List<FileVersion>>();
Map<FileHistoryId, FileVersion> purgeBeforeFileVersions = new TreeMap<FileHistoryId, FileVersion>();
if (options.isRemoveVersionsByInterval()) {
// Get file versions that should be purged according to the settings that are given. Time-based.
purgeFileVersions = collectPurgableFileVersions();
}
if (options.isRemoveOldVersions()) {
// Get all non-final fileversions and deleted (final) fileversions that we want to fully delete.
// purgeFileVersions is modified here!
purgeBeforeFileVersions = collectPurgeBeforeFileVersions(purgeFileVersions);
}
if (purgeFileVersions.isEmpty() && purgeBeforeFileVersions.isEmpty()) {
logger.log(Level.INFO, "- Old version removal: Not necessary.");
return;
}
logger.log(Level.INFO, "- Old version removal: Found {0} file histories and {1} file versions that need cleaning.", new Object[] { purgeFileVersions.size(), purgeBeforeFileVersions.size() });
// Local: First, remove file versions that are not longer needed
localDatabase.removeSmallerOrEqualFileVersions(purgeBeforeFileVersions);
localDatabase.removeFileVersions(purgeFileVersions);
// Local: Then, determine what must be changed remotely and remove it locally
Map<MultiChunkId, MultiChunkEntry> unusedMultiChunks = localDatabase.getUnusedMultiChunks();
localDatabase.removeUnreferencedDatabaseEntities();
deleteUnusedRemoteMultiChunks(unusedMultiChunks);
// Update stats
long unusedMultiChunkSize = 0;
for (MultiChunkEntry removedMultiChunk : unusedMultiChunks.values()) {
unusedMultiChunkSize += removedMultiChunk.getSize();
}
result.setRemovedOldVersionsCount(purgeBeforeFileVersions.size() + purgeFileVersions.size());
result.setRemovedMultiChunksCount(unusedMultiChunks.size());
result.setRemovedMultiChunksSize(unusedMultiChunkSize);
}
use of org.syncany.database.MultiChunkEntry in project syncany by syncany.
the class MultiChunkDaoTest method testGetMultiChunksByDatabaseVersion2.
@Test
public void testGetMultiChunksByDatabaseVersion2() throws Exception {
// Setup
Config testConfig = TestConfigUtil.createTestLocalConfig();
Connection databaseConnection = testConfig.createDatabaseConnection();
// Run
TestSqlUtil.runSqlFromResource(databaseConnection, "test.insert.set1.sql");
MultiChunkSqlDao multiChunkDao = new MultiChunkSqlDao(databaseConnection);
Map<MultiChunkId, MultiChunkEntry> multiChunksA4 = multiChunkDao.getMultiChunks(TestDatabaseUtil.createVectorClock("A4"));
Map<MultiChunkId, MultiChunkEntry> multiChunksA5 = multiChunkDao.getMultiChunks(TestDatabaseUtil.createVectorClock("A5"));
// Test
// - Database version "A4"
assertNotNull(multiChunksA4);
assertEquals(0, multiChunksA4.size());
// - Database version "A5"
assertNotNull(multiChunksA5);
assertEquals(1, multiChunksA5.size());
MultiChunkEntry multiChunkInA5 = multiChunksA5.get(MultiChunkId.parseMultiChunkId("dddddddddddddddddddddddddddddddddddddddd"));
assertNotNull(multiChunkInA5);
assertEquals("dddddddddddddddddddddddddddddddddddddddd", multiChunkInA5.getId().toString());
assertTrue(CollectionUtil.containsExactly(multiChunkInA5.getChunks(), ChunkChecksum.parseChunkChecksum("ffffffffffffffffffffffffffffffffffffffff")));
// Tear down
databaseConnection.close();
TestConfigUtil.deleteTestLocalConfigAndData(testConfig);
}
use of org.syncany.database.MultiChunkEntry in project syncany by syncany.
the class DatabaseXmlWriter method writeMultiChunks.
private void writeMultiChunks(IndentXmlStreamWriter xmlOut, Collection<MultiChunkEntry> multiChunks) throws XMLStreamException {
if (multiChunks.size() > 0) {
xmlOut.writeStartElement("multiChunks");
for (MultiChunkEntry multiChunk : multiChunks) {
xmlOut.writeStartElement("multiChunk");
xmlOut.writeAttribute("id", multiChunk.getId().toString());
xmlOut.writeAttribute("size", multiChunk.getSize());
xmlOut.writeStartElement("chunkRefs");
Collection<ChunkChecksum> multiChunkChunks = multiChunk.getChunks();
for (ChunkChecksum chunkChecksum : multiChunkChunks) {
xmlOut.writeEmptyElement("chunkRef");
xmlOut.writeAttribute("ref", chunkChecksum.toString());
}
// </chunkRefs>
xmlOut.writeEndElement();
// </multiChunk>
xmlOut.writeEndElement();
}
// </multiChunks>
xmlOut.writeEndElement();
}
}
use of org.syncany.database.MultiChunkEntry in project syncany by syncany.
the class UpOperation method addDirtyData.
/**
* This methods iterates over all {@link DatabaseVersion}s that are dirty. Dirty means that they are not in the winning
* branch. All data which is contained in these dirty DatabaseVersions is added to the newDatabaseVersion, so that it
* is included in the new Up. Note that only metadata is reuploaded, the actual multichunks are still in the repository.
*
* @param newDatabaseVersion {@link DatabaseVersion} to which dirty data should be added.
*/
private void addDirtyData(DatabaseVersion newDatabaseVersion) {
Iterator<DatabaseVersion> dirtyDatabaseVersions = localDatabase.getDirtyDatabaseVersions();
if (!dirtyDatabaseVersions.hasNext()) {
logger.log(Level.INFO, "No DIRTY data found in database (no dirty databases); Nothing to do here.");
} else {
logger.log(Level.INFO, "Adding DIRTY data to new database version: ");
while (dirtyDatabaseVersions.hasNext()) {
DatabaseVersion dirtyDatabaseVersion = dirtyDatabaseVersions.next();
logger.log(Level.INFO, "- Adding chunks/multichunks/filecontents from database version " + dirtyDatabaseVersion.getHeader());
for (ChunkEntry chunkEntry : dirtyDatabaseVersion.getChunks()) {
newDatabaseVersion.addChunk(chunkEntry);
}
for (MultiChunkEntry multiChunkEntry : dirtyDatabaseVersion.getMultiChunks()) {
newDatabaseVersion.addMultiChunk(multiChunkEntry);
}
for (FileContent fileContent : dirtyDatabaseVersion.getFileContents()) {
newDatabaseVersion.addFileContent(fileContent);
}
}
}
}
use of org.syncany.database.MultiChunkEntry in project syncany by syncany.
the class UpOperation method addMultiChunksToTransaction.
/**
* This methods adds the multichunks that are not yet present in the remote repo to the {@link RemoteTransaction} for
* uploading. Multichunks are not uploaded if they are dirty.
*
* @param multiChunkEntries Collection of multiChunkEntries that are included in the new {@link DatabaseVersion}
*/
private void addMultiChunksToTransaction(RemoteTransaction remoteTransaction, Collection<MultiChunkEntry> multiChunksEntries) throws InterruptedException, StorageException {
List<MultiChunkId> dirtyMultiChunkIds = localDatabase.getDirtyMultiChunkIds();
for (MultiChunkEntry multiChunkEntry : multiChunksEntries) {
if (dirtyMultiChunkIds.contains(multiChunkEntry.getId())) {
logger.log(Level.INFO, "- Ignoring multichunk (from dirty database, already uploaded), " + multiChunkEntry.getId() + " ...");
} else {
File localMultiChunkFile = config.getCache().getEncryptedMultiChunkFile(multiChunkEntry.getId());
MultichunkRemoteFile remoteMultiChunkFile = new MultichunkRemoteFile(multiChunkEntry.getId());
logger.log(Level.INFO, "- Uploading multichunk {0} from {1} to {2} ...", new Object[] { multiChunkEntry.getId(), localMultiChunkFile, remoteMultiChunkFile });
remoteTransaction.upload(localMultiChunkFile, remoteMultiChunkFile);
}
}
}
Aggregations