use of org.syncany.database.FileVersion in project syncany by syncany.
the class FileVersionSqlDao method getAllVersionsInQuery.
private Map<FileHistoryId, List<FileVersion>> getAllVersionsInQuery(PreparedStatement preparedStatement) throws SQLException {
try (ResultSet resultSet = preparedStatement.executeQuery()) {
Map<FileHistoryId, List<FileVersion>> fileHistoryPurgeFileVersions = new HashMap<FileHistoryId, List<FileVersion>>();
while (resultSet.next()) {
FileHistoryId fileHistoryId = FileHistoryId.parseFileId(resultSet.getString("filehistory_id"));
FileVersion fileVersion = createFileVersionFromRow(resultSet);
List<FileVersion> purgeFileVersions = fileHistoryPurgeFileVersions.get(fileHistoryId);
if (purgeFileVersions == null) {
purgeFileVersions = new ArrayList<FileVersion>();
fileHistoryPurgeFileVersions.put(fileHistoryId, purgeFileVersions);
}
purgeFileVersions.add(fileVersion);
}
return fileHistoryPurgeFileVersions;
}
}
use of org.syncany.database.FileVersion in project syncany by syncany.
the class FileVersionSqlDao method removeFileVersions.
/**
* Removes all file versions with versions <b>lower or equal</b> than the given file version.
*
* <p>Note that this method does not just delete the given file version, but also all of its
* previous versions.
*/
public void removeFileVersions(Map<FileHistoryId, FileVersion> purgeFileVersions) throws SQLException {
if (purgeFileVersions.size() > 0) {
try (PreparedStatement preparedStatement = getStatement(connection, "fileversion.delete.all.removeFileVersionsByIds.sql")) {
for (Map.Entry<FileHistoryId, FileVersion> purgeFileVersionEntry : purgeFileVersions.entrySet()) {
FileHistoryId purgeFileHistoryId = purgeFileVersionEntry.getKey();
FileVersion purgeFileVersion = purgeFileVersionEntry.getValue();
preparedStatement.setString(1, purgeFileHistoryId.toString());
preparedStatement.setLong(2, purgeFileVersion.getVersion());
preparedStatement.addBatch();
}
preparedStatement.executeBatch();
}
}
}
use of org.syncany.database.FileVersion in project syncany by syncany.
the class CleanupOperation method removeOldVersions.
/**
* This method checks if there exist {@link FileVersion}s which are to be deleted because the history they are a part
* of is too long. It will collect these, remove them locally and add them to the {@link RemoteTransaction} for deletion.
*/
private void removeOldVersions() throws Exception {
Map<FileHistoryId, List<FileVersion>> purgeFileVersions = new TreeMap<FileHistoryId, List<FileVersion>>();
Map<FileHistoryId, FileVersion> purgeBeforeFileVersions = new TreeMap<FileHistoryId, FileVersion>();
if (options.isRemoveVersionsByInterval()) {
// Get file versions that should be purged according to the settings that are given. Time-based.
purgeFileVersions = collectPurgableFileVersions();
}
if (options.isRemoveOldVersions()) {
// Get all non-final fileversions and deleted (final) fileversions that we want to fully delete.
// purgeFileVersions is modified here!
purgeBeforeFileVersions = collectPurgeBeforeFileVersions(purgeFileVersions);
}
if (purgeFileVersions.isEmpty() && purgeBeforeFileVersions.isEmpty()) {
logger.log(Level.INFO, "- Old version removal: Not necessary.");
return;
}
logger.log(Level.INFO, "- Old version removal: Found {0} file histories and {1} file versions that need cleaning.", new Object[] { purgeFileVersions.size(), purgeBeforeFileVersions.size() });
// Local: First, remove file versions that are not longer needed
localDatabase.removeSmallerOrEqualFileVersions(purgeBeforeFileVersions);
localDatabase.removeFileVersions(purgeFileVersions);
// Local: Then, determine what must be changed remotely and remove it locally
Map<MultiChunkId, MultiChunkEntry> unusedMultiChunks = localDatabase.getUnusedMultiChunks();
localDatabase.removeUnreferencedDatabaseEntities();
deleteUnusedRemoteMultiChunks(unusedMultiChunks);
// Update stats
long unusedMultiChunkSize = 0;
for (MultiChunkEntry removedMultiChunk : unusedMultiChunks.values()) {
unusedMultiChunkSize += removedMultiChunk.getSize();
}
result.setRemovedOldVersionsCount(purgeBeforeFileVersions.size() + purgeFileVersions.size());
result.setRemovedMultiChunksCount(unusedMultiChunks.size());
result.setRemovedMultiChunksSize(unusedMultiChunkSize);
}
use of org.syncany.database.FileVersion in project syncany by syncany.
the class Indexer method removeDeletedFiles.
private void removeDeletedFiles(DatabaseVersion newDatabaseVersion, List<File> deletedFiles) {
logger.log(Level.FINER, "- Looking for deleted files ...");
for (File deletedFile : deletedFiles) {
String path = FileUtil.getRelativeDatabasePath(config.getLocalDir(), deletedFile);
PartialFileHistory fileHistory = localDatabase.getFileHistoriesWithLastVersionByPath(path);
// Ignore this file history if it has been updated in this database version before (file probably renamed!)
if (newDatabaseVersion.getFileHistory(fileHistory.getFileHistoryId()) != null) {
continue;
}
// Check if file exists, remove if it doesn't
FileVersion lastLocalVersion = fileHistory.getLastVersion();
File lastLocalVersionOnDisk = new File(config.getLocalDir() + File.separator + lastLocalVersion.getPath());
// Ignore this file history if the last version is marked "DELETED"
if (lastLocalVersion.getStatus() == FileStatus.DELETED) {
continue;
}
// Add this file history if a new file with this name has been added (file type change)
PartialFileHistory newFileWithSameName = getFileHistoryByPathFromDatabaseVersion(newDatabaseVersion, fileHistory.getLastVersion().getPath());
// If file has VANISHED, mark as DELETED
if (!FileUtil.exists(lastLocalVersionOnDisk) || newFileWithSameName != null) {
PartialFileHistory fileHistoryForDeletion = createFileHistoryForDeletion(fileHistory, lastLocalVersion);
newDatabaseVersion.addFileHistory(fileHistoryForDeletion);
logger.log(Level.FINER, " + Deleted: Adding DELETED version: {0}", fileHistoryForDeletion.getLastVersion());
logger.log(Level.FINER, " based on: {0}", lastLocalVersion);
}
}
}
use of org.syncany.database.FileVersion in project syncany by syncany.
the class GetFileFolderRequestHandler method handleRequest.
@Override
public Response handleRequest(FolderRequest request) {
GetFileFolderRequest concreteRequest = (GetFileFolderRequest) request;
try {
FileHistoryId fileHistoryId = FileHistoryId.parseFileId(concreteRequest.getFileHistoryId());
long version = concreteRequest.getVersion();
FileVersion fileVersion = localDatabase.getFileVersion(fileHistoryId, version);
FileContent fileContent = localDatabase.getFileContent(fileVersion.getChecksum(), true);
Map<ChunkChecksum, MultiChunkId> multiChunks = localDatabase.getMultiChunkIdsByChecksums(fileContent.getChunks());
TransferManager transferManager = config.getTransferPlugin().createTransferManager(config.getConnection(), config);
Downloader downloader = new Downloader(config, transferManager);
Assembler assembler = new Assembler(config, localDatabase);
downloader.downloadAndDecryptMultiChunks(new HashSet<MultiChunkId>(multiChunks.values()));
File tempFile = assembler.assembleToCache(fileVersion);
String tempFileToken = StringUtil.toHex(ObjectId.secureRandomBytes(40));
GetFileFolderResponse fileResponse = new GetFileFolderResponse(concreteRequest.getId(), concreteRequest.getRoot(), tempFileToken);
GetFileFolderResponseInternal fileResponseInternal = new GetFileFolderResponseInternal(fileResponse, tempFile);
eventBus.post(fileResponseInternal);
return null;
} catch (Exception e) {
logger.log(Level.WARNING, "Cannot reassemble file.", e);
return new BadRequestResponse(concreteRequest.getId(), "Cannot reassemble file.");
}
}
Aggregations