use of org.syncany.database.MultiChunkEntry.MultiChunkId in project syncany by syncany.
the class MultiChunkDaoTest method testGetMultiChunksByDatabaseVersion1.
@Test
public void testGetMultiChunksByDatabaseVersion1() throws Exception {
// Setup
Config testConfig = TestConfigUtil.createTestLocalConfig();
Connection databaseConnection = testConfig.createDatabaseConnection();
// Run
TestSqlUtil.runSqlFromResource(databaseConnection, "test.insert.set3.sql");
MultiChunkSqlDao multiChunkDao = new MultiChunkSqlDao(databaseConnection);
Map<MultiChunkId, MultiChunkEntry> multiChunksA6 = multiChunkDao.getMultiChunks(TestDatabaseUtil.createVectorClock("A6"));
Map<MultiChunkId, MultiChunkEntry> multiChunksA7B2 = multiChunkDao.getMultiChunks(TestDatabaseUtil.createVectorClock("A7,B2"));
// Test
// - Database version "A6"
assertNotNull(multiChunksA6);
assertEquals(1, multiChunksA6.size());
MultiChunkEntry multiChunkInA6 = multiChunksA6.get(MultiChunkId.parseMultiChunkId("9302d8b104023627f655fa7745927fdeb3df674b"));
assertNotNull(multiChunkInA6);
assertEquals("9302d8b104023627f655fa7745927fdeb3df674b", multiChunkInA6.getId().toString());
assertTrue(CollectionUtil.containsExactly(multiChunkInA6.getChunks(), ChunkChecksum.parseChunkChecksum("24a39e00d6156804e27f7c0987d00903da8e6682")));
// - Database version "A8,B3"
assertNotNull(multiChunksA7B2);
assertEquals(1, multiChunksA7B2.size());
MultiChunkEntry multiChunkInA7B2 = multiChunksA7B2.get(MultiChunkId.parseMultiChunkId("51aaca5c1280b1cf95cff8a3266a6bb44b482ad4"));
assertEquals("51aaca5c1280b1cf95cff8a3266a6bb44b482ad4", multiChunkInA7B2.getId().toString());
assertTrue(CollectionUtil.containsExactly(multiChunkInA7B2.getChunks(), ChunkChecksum.parseChunkChecksum("0fecbac8ac8a5f8b7aa12b2741a4ef5db88c5dea"), ChunkChecksum.parseChunkChecksum("38a18897e94a901b833e750e8604d9616a02ca84"), ChunkChecksum.parseChunkChecksum("47dded182d31799267f12eb9864cdc11127b3352"), ChunkChecksum.parseChunkChecksum("5abe80d7dd96369a3e53993cd69279400ec740bd"), ChunkChecksum.parseChunkChecksum("5f0b34374821423f69bf2231210245ccf0302df0"), ChunkChecksum.parseChunkChecksum("615fba8c2281d5bee891eb092a252d235c237457"), ChunkChecksum.parseChunkChecksum("8ed8d50a6e9da3197bd665bc3a1f229ebcde9b42"), ChunkChecksum.parseChunkChecksum("9974b55a79994b4bfe007983539ca21b2679ba35"), ChunkChecksum.parseChunkChecksum("a301a81d5a4f427d04791b89bfd7798eda6bd013"), ChunkChecksum.parseChunkChecksum("a7405a0bada0035ed52a1a44a4d381b78dc59d19"), ChunkChecksum.parseChunkChecksum("ab85720d3f31bd08ca1cd25dcd8a490e5f00783b"), ChunkChecksum.parseChunkChecksum("b0223d9770a5c0d7e22ac3d2706c4c9858cf42a9"), ChunkChecksum.parseChunkChecksum("b310c0eedcd03238888c6abb3e3398633139ecc5"), ChunkChecksum.parseChunkChecksum("f15eace568ea3c324ecd3d01b67e692bbf8a2f1b")));
// Tear down
databaseConnection.close();
TestConfigUtil.deleteTestLocalConfigAndData(testConfig);
}
use of org.syncany.database.MultiChunkEntry.MultiChunkId in project syncany by syncany.
the class MultiChunkDaoTest method testGetMultiChunkId.
@Test
public void testGetMultiChunkId() throws Exception {
// Setup
Config testConfig = TestConfigUtil.createTestLocalConfig();
Connection databaseConnection = testConfig.createDatabaseConnection();
// Run
TestSqlUtil.runSqlFromResource(databaseConnection, "test.insert.set3.sql");
MultiChunkSqlDao multiChunkDao = new MultiChunkSqlDao(databaseConnection);
MultiChunkId multiChunkId1 = multiChunkDao.getMultiChunkId(ChunkChecksum.parseChunkChecksum("eba69a8e359ce3258520138a50ed9860127ab6e0"));
MultiChunkId multiChunkId2 = multiChunkDao.getMultiChunkId(ChunkChecksum.parseChunkChecksum("0fecbac8ac8a5f8b7aa12b2741a4ef5db88c5dea"));
MultiChunkId multiChunkId3 = multiChunkDao.getMultiChunkId(ChunkChecksum.parseChunkChecksum("38a18897e94a901b833e750e8604d9616a02ca84"));
MultiChunkId multiChunkIdNonExistent = multiChunkDao.getMultiChunkId(ChunkChecksum.parseChunkChecksum("beefbeefbeefbeefbeefbeefbeefbeefbeefbeef"));
// Test
assertNotNull(multiChunkId1);
assertEquals("0d79eed3fd8ac866b5872ea3f3f079c46dd15ac9", multiChunkId1.toString());
assertNotNull(multiChunkId2);
assertEquals("51aaca5c1280b1cf95cff8a3266a6bb44b482ad4", multiChunkId2.toString());
assertNotNull(multiChunkId3);
assertEquals("51aaca5c1280b1cf95cff8a3266a6bb44b482ad4", multiChunkId3.toString());
assertEquals(multiChunkId2, multiChunkId3);
assertNull(multiChunkIdNonExistent);
// Tear down
databaseConnection.close();
TestConfigUtil.deleteTestLocalConfigAndData(testConfig);
}
use of org.syncany.database.MultiChunkEntry.MultiChunkId in project syncany by syncany.
the class RestoreOperation method downloadMultiChunks.
private void downloadMultiChunks(FileVersion restoreFileVersion) throws StorageException, IOException {
Set<MultiChunkId> multiChunksToDownload = new HashSet<MultiChunkId>();
FileChecksum restoreFileChecksum = restoreFileVersion.getChecksum();
if (restoreFileChecksum != null) {
multiChunksToDownload.addAll(localDatabase.getMultiChunkIds(restoreFileChecksum));
logger.log(Level.INFO, "Downloading " + multiChunksToDownload.size() + " multichunk(s) to restore file ...");
downloader.downloadAndDecryptMultiChunks(multiChunksToDownload);
}
}
use of org.syncany.database.MultiChunkEntry.MultiChunkId in project syncany by syncany.
the class MemoryDatabaseCacheTest method testMultiChunkCache.
@Test
public void testMultiChunkCache() throws IOException {
MemoryDatabase database = new MemoryDatabase();
// Round 1: Add chunk to multichunk
DatabaseVersion databaseVersion1 = TestDatabaseUtil.createDatabaseVersion();
MultiChunkEntry multiChunkP1 = new MultiChunkEntry(new MultiChunkId(new byte[] { 8, 8, 8, 8, 8, 8, 8, 8 }), 10);
ChunkEntry chunkA1 = new ChunkEntry(new ChunkChecksum(new byte[] { 1, 2, 3, 4, 5, 7, 8, 9, 0 }), 12);
multiChunkP1.addChunk(chunkA1.getChecksum());
databaseVersion1.addChunk(chunkA1);
databaseVersion1.addMultiChunk(multiChunkP1);
database.addDatabaseVersion(databaseVersion1);
assertEquals(chunkA1, database.getChunk(new ChunkChecksum(new byte[] { 1, 2, 3, 4, 5, 7, 8, 9, 0 })));
assertEquals(multiChunkP1, database.getMultiChunk(new MultiChunkId(new byte[] { 8, 8, 8, 8, 8, 8, 8, 8 })));
// Round 2: Add chunk to multichunk
DatabaseVersion databaseVersion2 = TestDatabaseUtil.createDatabaseVersion(databaseVersion1);
MultiChunkEntry multiChunkP2 = new MultiChunkEntry(new MultiChunkId(new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7 }), 11);
MultiChunkEntry multiChunkP3 = new MultiChunkEntry(new MultiChunkId(new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5 }), 12);
ChunkEntry chunkA2 = new ChunkEntry(new ChunkChecksum(new byte[] { 9, 2, 3, 4, 5, 7, 8, 9, 0 }), 912);
ChunkEntry chunkA3 = new ChunkEntry(new ChunkChecksum(new byte[] { 8, 2, 3, 4, 5, 7, 8, 9, 0 }), 812);
ChunkEntry chunkA4 = new ChunkEntry(new ChunkChecksum(new byte[] { 7, 2, 3, 4, 5, 7, 8, 9, 0 }), 712);
multiChunkP2.addChunk(chunkA2.getChecksum());
multiChunkP2.addChunk(chunkA3.getChecksum());
multiChunkP3.addChunk(chunkA4.getChecksum());
databaseVersion2.addChunk(chunkA2);
databaseVersion2.addChunk(chunkA3);
databaseVersion2.addChunk(chunkA4);
databaseVersion2.addMultiChunk(multiChunkP2);
databaseVersion2.addMultiChunk(multiChunkP3);
database.addDatabaseVersion(databaseVersion2);
// fail("xx");
assertEquals(chunkA1, database.getChunk(new ChunkChecksum(new byte[] { 1, 2, 3, 4, 5, 7, 8, 9, 0 })));
assertEquals(chunkA2, database.getChunk(new ChunkChecksum(new byte[] { 9, 2, 3, 4, 5, 7, 8, 9, 0 })));
assertEquals(chunkA3, database.getChunk(new ChunkChecksum(new byte[] { 8, 2, 3, 4, 5, 7, 8, 9, 0 })));
assertEquals(chunkA4, database.getChunk(new ChunkChecksum(new byte[] { 7, 2, 3, 4, 5, 7, 8, 9, 0 })));
assertEquals(multiChunkP1, database.getMultiChunk(new MultiChunkId(new byte[] { 8, 8, 8, 8, 8, 8, 8, 8 })));
assertEquals(multiChunkP2, database.getMultiChunk(new MultiChunkId(new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7 })));
assertEquals(multiChunkP3, database.getMultiChunk(new MultiChunkId(new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5 })));
}
use of org.syncany.database.MultiChunkEntry.MultiChunkId in project syncany by syncany.
the class Deduper method deduplicate.
/**
* Deduplicates the given list of files according to the Syncany chunk algorithm.
*
* <p>A brief description of the algorithm (and further links to a detailed description)
* are given in the {@link Deduper}.
*
* @param files List of files to be deduplicated (will be modified!)
* @param listener Listener to react of file/chunk/multichunk events, and to implement the chunk index
* @throws IOException If a file cannot be read or an unexpected exception occurs
*/
public void deduplicate(List<File> files, DeduperListener listener) throws IOException {
Chunk chunk = null;
MultiChunk multiChunk = null;
long totalMultiChunkSize = 0L;
long totalNumFiles = 0L;
while (!files.isEmpty()) {
File file = files.remove(0);
totalNumFiles++;
// Filter ignored files
boolean fileAccepted = listener.onFileFilter(file);
if (!fileAccepted) {
continue;
}
// Decide whether to index the contents
boolean dedupContents = listener.onFileStart(file);
if (dedupContents) {
// Create chunks from file
ChunkEnumeration chunksEnum = chunker.createChunks(file);
while (chunksEnum.hasMoreElements()) {
chunk = chunksEnum.nextElement();
// old chunk
if (!listener.onChunk(chunk)) {
listener.onFileAddChunk(file, chunk);
continue;
} else // new chunk
{
// - Check if multichunk full
if (multiChunk != null && multiChunk.isFull()) {
totalMultiChunkSize += multiChunk.getSize();
multiChunk.close();
listener.onMultiChunkClose(multiChunk);
multiChunk = null;
}
// - Open new multichunk if non-existent
if (multiChunk == null) {
MultiChunkId newMultiChunkId = listener.createNewMultiChunkId(chunk);
File multiChunkFile = listener.getMultiChunkFile(newMultiChunkId);
multiChunk = multiChunker.createMultiChunk(newMultiChunkId, transformer.createOutputStream(new FileOutputStream(multiChunkFile)));
listener.onMultiChunkOpen(multiChunk);
}
// - Add chunk data
multiChunk.write(chunk);
listener.onMultiChunkWrite(multiChunk, chunk);
}
listener.onFileAddChunk(file, chunk);
}
// Closing file is necessary!
chunksEnum.close();
}
if (chunk != null) {
listener.onFileEnd(file, chunk.getFileChecksum());
} else {
listener.onFileEnd(file, null);
}
// Reset chunk (if folder after chunk, the folder would have a checksum b/c of chunk.getFileChecksum())
chunk = null;
// Check if we have reached the transaction limit
if (multiChunk != null) {
if (totalMultiChunkSize + multiChunk.getSize() >= maxTotalSize || totalNumFiles >= maxNumberOfFiles) {
multiChunk.close();
listener.onMultiChunkClose(multiChunk);
return;
}
} else if (totalMultiChunkSize >= maxTotalSize || totalNumFiles >= maxNumberOfFiles) {
return;
}
}
// Close and add last multichunk
if (multiChunk != null) {
// Data
multiChunk.close();
listener.onMultiChunkClose(multiChunk);
multiChunk = null;
}
listener.onFinish();
return;
}
Aggregations