use of org.ethereum.db.ByteArrayWrapper in project rskj by rsksmart.
the class LevelDbDataSource method mergeDataSources.
public static void mergeDataSources(Path destinationPath, List<Path> originPaths) {
Map<ByteArrayWrapper, byte[]> mergedStores = new HashMap<>();
for (Path originPath : originPaths) {
KeyValueDataSource singleOriginDataSource = makeDataSource(originPath);
singleOriginDataSource.keys().forEach(kw -> mergedStores.put(kw, singleOriginDataSource.get(kw.getData())));
singleOriginDataSource.close();
}
KeyValueDataSource destinationDataSource = makeDataSource(destinationPath);
destinationDataSource.updateBatch(mergedStores, Collections.emptySet());
destinationDataSource.close();
}
use of org.ethereum.db.ByteArrayWrapper in project rskj by rsksmart.
the class LevelDbDataSource method updateBatchInternal.
private void updateBatchInternal(Map<ByteArrayWrapper, byte[]> rows, Set<ByteArrayWrapper> deleteKeys) throws IOException {
Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_WRITE);
if (rows.containsKey(null) || rows.containsValue(null)) {
profiler.stop(metric);
throw new IllegalArgumentException("Cannot update null values");
}
// Note that this is not atomic.
try (WriteBatch batch = db.createWriteBatch()) {
for (Map.Entry<ByteArrayWrapper, byte[]> entry : rows.entrySet()) {
batch.put(entry.getKey().getData(), entry.getValue());
}
for (ByteArrayWrapper deleteKey : deleteKeys) {
batch.delete(deleteKey.getData());
}
db.write(batch);
profiler.stop(metric);
}
}
use of org.ethereum.db.ByteArrayWrapper in project rskj by rsksmart.
the class LevelDbDataSource method keys.
@Override
public Set<ByteArrayWrapper> keys() {
Metric metric = profiler.start(Profiler.PROFILING_TYPE.DB_READ);
resetDbLock.readLock().lock();
try {
if (logger.isTraceEnabled()) {
logger.trace("~> LevelDbDataSource.keys(): {}", name);
}
try (DBIterator iterator = db.iterator()) {
Set<ByteArrayWrapper> result = new HashSet<>();
for (iterator.seekToFirst(); iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
result.add(ByteUtil.wrap(key));
}
if (logger.isTraceEnabled()) {
logger.trace("<~ LevelDbDataSource.keys(): {}, {}", name, result.size());
}
return result;
} catch (IOException e) {
logger.error("Unexpected", e);
panicProcessor.panic("leveldb", String.format("Unexpected %s", e.getMessage()));
throw new RuntimeException(e);
}
} finally {
resetDbLock.readLock().unlock();
profiler.stop(metric);
}
}
use of org.ethereum.db.ByteArrayWrapper in project rskj by rsksmart.
the class CacheSnapshotHandlerTest method save_MapCannotBeSaved_CacheFileShouldNotBeModified.
@Test
public void save_MapCannotBeSaved_CacheFileShouldNotBeModified() throws IOException {
assertTrue(cacheSnapshotPath.toFile().createNewFile());
assertEquals(0, cacheSnapshotPath.toFile().length());
MapSnapshot.Out outSnapshot = mock(MapSnapshot.Out.class);
doThrow(new IOException()).when(outSnapshot).write(anyMap());
doReturn(outSnapshot).when(mapSnapshotFactory).makeOutputSnapshot(any());
File tempFile = spy(tempFileCreator.createTempFile("cache", ".tmp"));
doReturn(tempFile).when(tempFileCreator).createTempFile(anyString(), anyString());
Map<ByteArrayWrapper, byte[]> cache = new HashMap<>();
cache.put(ByteUtil.wrap(new byte[] { 1, 2, 3 }), new byte[] { 4, 5, 6 });
cacheSnapshotHandler.save(cache);
assertTrue(cacheSnapshotPath.toFile().exists());
assertEquals(0, cacheSnapshotPath.toFile().length());
verify(tempFile, atLeastOnce()).deleteOnExit();
}
use of org.ethereum.db.ByteArrayWrapper in project rskj by rsksmart.
the class Wallet method getAccountAddresses.
public List<byte[]> getAccountAddresses() {
List<byte[]> addresses = new ArrayList<>();
Set<RskAddress> keys = new HashSet<>();
synchronized (accessLock) {
for (RskAddress addr : this.initialAccounts) {
addresses.add(addr.getBytes());
}
for (ByteArrayWrapper addressWrapped : keyDS.keys()) {
keys.add(new RskAddress(addressWrapped.getData()));
}
keys.addAll(accounts.keySet());
keys.removeAll(this.initialAccounts);
for (RskAddress addr : keys) {
addresses.add(addr.getBytes());
}
}
return addresses;
}
Aggregations