use of com.hedera.services.state.virtual.ContractValue in project hedera-services by hashgraph.
the class CliffClickMapBench method putIfAbsent.
@Benchmark
public void putIfAbsent() throws Exception {
map.putIfAbsent(nextIndex, new ContractValue(nextIndex));
nextIndex++;
}
use of com.hedera.services.state.virtual.ContractValue in project hedera-services by hashgraph.
the class CliffClickMapBench method setup.
@Setup(Level.Trial)
public void setup() {
nextIndex = numEntities;
map = new NonBlockingHashMapLong<>(false);
// fill with some data
for (int i = 0; i < numEntities; i++) {
map.put(i, new ContractValue(i));
}
// print memory usage
System.out.printf("Memory for initial %,d accounts:\n", numEntities);
printMemoryUsage();
}
use of com.hedera.services.state.virtual.ContractValue in project hedera-services by hashgraph.
the class ConcurrentMapBench method add.
@Benchmark
public void add() throws Exception {
map.put(nextIndex, new ContractValue(nextIndex));
nextIndex++;
}
use of com.hedera.services.state.virtual.ContractValue in project hedera-services by hashgraph.
the class ConcurrentMapBench method setup.
@Setup(Level.Trial)
public void setup() {
nextIndex = numEntities;
switch(impl) {
case "SynchronizedHashMap":
map = Collections.synchronizedMap(new HashMap<>());
break;
case "ConcurrentSkipListMap":
map = new ConcurrentSkipListMap<>();
break;
default:
map = new ConcurrentHashMap<>();
}
;
// fill with some data
for (long i = 0; i < numEntities; i++) {
map.put(i, new ContractValue(i));
}
// print memory usage
System.out.printf("Memory for initial %,d accounts:\n", numEntities);
printMemoryUsage();
}
use of com.hedera.services.state.virtual.ContractValue in project hedera-services by hashgraph.
the class ThorsFileHammer method updateAllValues.
public void updateAllValues() {
try {
final int batchSize = 1000;
final int numOfBatches = (int) ((fileCollection.getMaximumValidKey() - fileCollection.getMinimumValidKey()) / batchSize);
System.out.println("ThorsHammer.updateAllValues numOfBatches=" + numOfBatches);
for (int batchIndex = 0; batchIndex < numOfBatches; batchIndex++) {
final int firstLeaf = (int) (fileCollection.getMinimumValidKey() + (batchIndex * batchSize));
final int lastLeaf = firstLeaf + batchSize;
final var leafRecordStream = LongStream.range(firstLeaf, lastLeaf).mapToObj(path -> new VirtualLeafRecord<>(path, hash((int) path), new ContractKey(path / 1000, path), new ContractValue(RANDOM.nextLong()))).peek(leaf -> compareToMe.set((int) leaf.getPath(), (int) leaf.getValue().asLong()));
try {
readWriteLock.writeLock().lock();
fileCollection.startWriting();
final Map<Long, Long> indexUpdates = new HashMap<>();
leafRecordStream.forEach(leaf -> {
try {
indexUpdates.put(leaf.getPath(), fileCollection.storeDataItem(leaf));
} catch (IOException e) {
e.printStackTrace();
}
});
final var dataFile = fileCollection.endWriting(fileCollection.getMinimumValidKey(), fileCollection.getMaximumValidKey());
for (var update : indexUpdates.entrySet()) {
index.put(update.getKey(), update.getValue());
}
dataFile.setFileAvailableForMerging(true);
} finally {
readWriteLock.writeLock().unlock();
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
Aggregations