use of org.syncany.chunk.Deduper in project syncany by syncany.
the class UpOperation method startIndexerThread.
private void startIndexerThread(BlockingQueue<DatabaseVersion> databaseVersionQueue) {
// Get a list of files that have been updated
ChangeSet localChanges = result.getStatusResult().getChangeSet();
List<File> locallyUpdatedFiles = extractLocallyUpdatedFiles(localChanges);
List<File> locallyDeletedFiles = extractLocallyDeletedFiles(localChanges);
// Iterate over the changes, deduplicate, and feed DatabaseVersions into an iterator
Deduper deduper = new Deduper(config.getChunker(), config.getMultiChunker(), config.getTransformer(), options.getTransactionSizeLimit(), options.getTransactionFileLimit());
AsyncIndexer asyncIndexer = new AsyncIndexer(config, deduper, locallyUpdatedFiles, locallyDeletedFiles, databaseVersionQueue);
new Thread(asyncIndexer, "AsyncI/" + config.getLocalDir().getName()).start();
}
use of org.syncany.chunk.Deduper in project syncany by syncany.
the class FrameworkCombinationTest method deduplicateAndCreateChunkIndex.
private ChunkIndex deduplicateAndCreateChunkIndex(final List<File> inputFiles, FrameworkCombination combination) throws IOException {
logger.log(Level.INFO, "- Deduplicate and create chunk index ...");
final ChunkIndex chunkIndex = new ChunkIndex();
Deduper deduper = new Deduper(combination.chunker, combination.multiChunker, combination.transformer, Long.MAX_VALUE, Long.MAX_VALUE);
deduper.deduplicate(inputFiles, new DeduperListener() {
@Override
public void onMultiChunkWrite(MultiChunk multiChunk, Chunk chunk) {
logger.log(Level.INFO, " - Adding chunk " + StringUtil.toHex(chunk.getChecksum()) + " to multichunk " + multiChunk.getId() + " ...");
chunkIndex.chunkIDToMultiChunkID.put(new ChunkChecksum(chunk.getChecksum()), multiChunk.getId());
}
@Override
public void onFileAddChunk(File file, Chunk chunk) {
logger.log(Level.INFO, " - Adding chunk " + StringUtil.toHex(chunk.getChecksum()) + " to inputFileToChunkIDs-map for file " + file + " ...");
List<ChunkChecksum> chunkIDsForFile = chunkIndex.inputFileToChunkIDs.get(file);
if (chunkIDsForFile == null) {
chunkIDsForFile = new ArrayList<ChunkChecksum>();
}
chunkIDsForFile.add(new ChunkChecksum(chunk.getChecksum()));
chunkIndex.inputFileToChunkIDs.put(file, chunkIDsForFile);
}
@Override
public boolean onChunk(Chunk chunk) {
if (chunkIndex.chunkIDToMultiChunkID.containsKey(new ChunkChecksum(chunk.getChecksum()))) {
logger.log(Level.INFO, " + Known chunk " + StringUtil.toHex(chunk.getChecksum()));
return false;
} else {
logger.log(Level.INFO, " + New chunk " + StringUtil.toHex(chunk.getChecksum()));
return true;
}
}
@Override
public File getMultiChunkFile(MultiChunkId multiChunkId) {
File outputMultiChunk = new File(tempDir + "/multichunk-" + multiChunkId);
chunkIndex.outputMultiChunkFiles.add(outputMultiChunk);
return outputMultiChunk;
}
@Override
public MultiChunkId createNewMultiChunkId(Chunk firstChunk) {
// Note: In the real implementation, this should be random
return new MultiChunkId(firstChunk.getChecksum());
}
@Override
public boolean onFileFilter(File file) {
return true;
}
@Override
public boolean onFileStart(File file) {
return file.isFile() && !FileUtil.isSymlink(file);
}
@Override
public void onFileEnd(File file, byte[] checksum) {
// Empty
}
@Override
public void onMultiChunkOpen(MultiChunk multiChunk) {
// Empty
}
@Override
public void onMultiChunkClose(MultiChunk multiChunk) {
// Empty
}
@Override
public void onStart(int fileCount) {
// Empty
}
@Override
public void onFinish() {
// Empty
}
});
return chunkIndex;
}
Aggregations