use of com.hedera.services.state.virtual.ContractKeySupplier in project hedera-services by hashgraph.
the class ContractBench method prepare.
@Setup
public void prepare() throws Exception {
pipeline = new Pipeline<>();
final long estimatedNumKeyValuePairs = (long) (numContracts * (1 - bigPercent - hugePercent) * ((kbPerContract * 1024L) / ESTIMATED_KEY_VALUE_SIZE)) + (long) (numContracts * bigPercent * ((kbPerBigContract * 1024L) / ESTIMATED_KEY_VALUE_SIZE)) + (long) (numContracts * hugePercent * ((kbPerHugeContract * 1024L) / ESTIMATED_KEY_VALUE_SIZE));
System.out.println("estimatedNumKeyValuePairs = " + estimatedNumKeyValuePairs);
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
Path dataSourcePath = getDataSourcePath(dsType);
boolean dataSourceDirExisted = Files.exists(dataSourcePath);
virtualMap = createMap(dsType, virtualLeafRecordSerializer, new ContractKeySerializer(), estimatedNumKeyValuePairs, dataSourcePath, preferDiskBasedIndexes);
txProcessor = new TransactionProcessor<>(preFetchEventHandlers, (Transaction<Data> tx) -> {
// preFetch logic
VirtualMap<ContractKey, ContractValue> map = getVirtualMap();
final Data data = tx.getData();
data.value1 = map.getForModify(data.key1);
data.value2 = map.getForModify(data.key2);
}, (Transaction<Data> tx) -> {
// handleTransaction logic
final Data data = tx.getData();
data.value1.setValue(data.value1.asLong() - data.transferAmount);
data.value2.setValue(data.value2.asLong() + data.transferAmount);
});
// We generate a different number of key/value pairs depending on whether it is
// a huge contract, big contract, or normal contract
int numBigContracts = (int) (numContracts * bigPercent);
System.out.println("numBigContracts = " + numBigContracts);
int numHugeContracts = (int) (numContracts * hugePercent);
System.out.println("numHugeContracts = " + numHugeContracts);
keyValuePairsPerContract = new int[numContracts];
for (int i = 0; i < numContracts; i++) {
final int kb;
if (i > 0 && (i % 100) == 0 && numHugeContracts > 0) {
kb = kbPerHugeContract;
numHugeContracts--;
} else if (i > 0 && (i % 10) == 0 && numBigContracts > 0) {
kb = kbPerBigContract;
numBigContracts--;
} else {
kb = kbPerContract;
}
final var numKeyValuePairs = (kb * 1024L) / ESTIMATED_KEY_VALUE_SIZE;
keyValuePairsPerContract[i] = (int) numKeyValuePairs;
}
if (!dataSourceDirExisted && preFill) {
long countOfKeyValuePairs = 0;
long lastCountOfKeyValuePairs = 0;
for (int i = 0; i < numContracts; i++) {
if ((countOfKeyValuePairs - lastCountOfKeyValuePairs) > 100_000) {
lastCountOfKeyValuePairs = countOfKeyValuePairs;
System.out.printf("Completed: %,d contracts and %,d key/value pairs\n", i, countOfKeyValuePairs);
virtualMap = pipeline.endRound(virtualMap);
}
if (i > 0 && i % 10000 == 0) {
System.out.println("=============== GC =======================");
// loading is really intense so give GC a chance to catch up
System.gc();
Thread.sleep(1000);
}
final int numKeyValuePairs = keyValuePairsPerContract[i];
for (int j = 0; j < numKeyValuePairs; j++) {
final var key = asContractKey(i, j);
final var value = new ContractValue(j);
try {
virtualMap.put(key, value);
} catch (Exception e) {
e.printStackTrace();
System.err.println(i + ":" + j);
throw e;
}
}
countOfKeyValuePairs += numKeyValuePairs;
}
// During setup, we perform the full hashing and release the old copy. This way,
// during the tests, we don't have an initial slow hash.
System.out.printf("Completed: %,d contracts and %,d key/value pairs\n", numContracts, countOfKeyValuePairs);
virtualMap = pipeline.endRound(virtualMap);
} else {
System.out.println("NOT PRE_FILLING AS LOADED FROM FILES OR TURNED OFF WITH FLAG!");
}
printDataStoreSize();
// create a snapshot every 15min
DateFormat df = new SimpleDateFormat("yyyy-MM-dd--HH-mm");
ScheduledExecutorService snapshotting = Executors.newScheduledThreadPool(1, runnable -> new Thread(runnable, "Snapshot"));
snapshotting.scheduleWithFixedDelay(() -> {
final Path snapshotDir = Path.of("jasperdb_snapshot_" + df.format(new Date()));
System.out.println("************ STARTING SNAPSHOT [" + snapshotDir.toAbsolutePath() + "] ***********");
long START = System.currentTimeMillis();
try {
virtualMap.getDataSource().snapshot(snapshotDir);
} catch (IOException e) {
e.printStackTrace();
}
double tookSeconds = (System.currentTimeMillis() - START) * Units.MILLISECONDS_TO_SECONDS;
System.out.printf("************ SNAPSHOT FINISHED took %,3f seconds [%s] ***********\n", tookSeconds, snapshotDir.toAbsolutePath());
}, 0, 5, TimeUnit.MINUTES);
}
use of com.hedera.services.state.virtual.ContractKeySupplier in project hedera-services by hashgraph.
the class VirtualDataSourceBench method setup.
@Setup(Level.Trial)
public void setup() throws IOException {
random = new Random(1234);
System.out.println("dataSourcePath = " + dataSourcePath);
if (Files.exists(dataSourcePath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db.");
deleteDirectoryAndContents(dataSourcePath);
}
if (Files.exists(dataSourceSnapshotPath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db snapshot.");
deleteDirectoryAndContents(dataSourceSnapshotPath);
}
// create data source
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(dataSourcePath).maxNumOfKeys(500_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(0).mergingEnabled(false);
dataSource = dbBuilder.build("jdb", "4dsBench");
// populate with initial data
System.out.printf("Creating initial data set of %,d leaves\n", initialDataSize);
progressPercentage = 0;
final long firstLeafPath = initialDataSize;
final long lastLeafPath = firstLeafPath + initialDataSize;
var internalRecordStream = LongStream.range(0, firstLeafPath).mapToObj(path -> new VirtualInternalRecord(path, hash((int) path)));
var leafRecordStream = LongStream.range(firstLeafPath, lastLeafPath + 1).mapToObj(path -> new VirtualLeafRecord<>(path, hash((int) path), new ContractKey(path, path), new ContractValue(path))).peek(leaf -> printProgress(leaf.getPath(), lastLeafPath));
dataSource.saveRecords(firstLeafPath, lastLeafPath, internalRecordStream, leafRecordStream, Stream.empty());
System.out.printf("Done creating initial data set of %,d leaves\n", initialDataSize);
}
use of com.hedera.services.state.virtual.ContractKeySupplier in project hedera-services by hashgraph.
the class ContractDataSourceValidator method main.
public static void main(String[] args) throws IOException {
Path dataSourcePath = Path.of("").toAbsolutePath();
System.out.println("dataSourcePath = " + dataSourcePath + " exists " + Files.exists(dataSourcePath));
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
;
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(dataSourcePath).maxNumOfKeys(500_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(0).mergingEnabled(true);
DataSourceValidator<ContractKey, ContractValue> dataSourceValidator = new DataSourceValidator<>(dbBuilder.build("jdb", "4validator"));
dataSourceValidator.validate();
}
use of com.hedera.services.state.virtual.ContractKeySupplier in project hedera-services by hashgraph.
the class VirtualDataSourceNewAPIBench method setup.
@Setup(Level.Trial)
public void setup() {
System.out.println("------- Setup -----------------------------");
storePath = Path.of("store-" + impl);
try {
final boolean storeExists = Files.exists(storePath);
// get slot index suppliers
switch(impl) {
case "lmdb":
dataSource = new VFCDataSourceLmdb<>(// max seralized size
1 + 8 + 32, // max seralized size
ContractKey::new, ContractValue.SERIALIZED_SIZE, ContractValue::new, storePath);
break;
// storePath);
case "jasperdb":
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(storePath).maxNumOfKeys(numEntities + 10_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(Long.MAX_VALUE).mergingEnabled(true);
dataSource = dbBuilder.build("jdb", "4ApiBench");
break;
default:
throw new IllegalStateException("Unexpected value: " + impl);
}
;
// create data
if (!storeExists) {
System.out.println("================================================================================");
System.out.println("Creating data ...");
// create internal nodes and leaves
long iHaveWritten = 0;
while (iHaveWritten < numEntities) {
final long start = System.currentTimeMillis();
final long batchSize = Math.min(WRITE_BATCH_SIZE, numEntities - iHaveWritten);
dataSource.saveRecords(numEntities, numEntities * 2, LongStream.range(iHaveWritten, iHaveWritten + batchSize).mapToObj(i -> new VirtualInternalRecord(i, hash((int) i))), LongStream.range(iHaveWritten, iHaveWritten + batchSize).mapToObj(i -> new VirtualLeafRecord<>(i + numEntities, hash((int) i), new ContractKey(i, i), new ContractValue(i))), Stream.empty());
iHaveWritten += batchSize;
printUpdate(start, batchSize, ContractValue.SERIALIZED_SIZE, "Created " + iHaveWritten + " Nodes");
}
System.out.println("================================================================================");
// set nextPath
nextPath = numEntities;
// let merge catch up
try {
System.out.println("Waiting for merge");
Thread.sleep(30000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
System.out.println("Loaded existing data");
}
} catch (Exception e) {
e.printStackTrace();
}
}
use of com.hedera.services.state.virtual.ContractKeySupplier in project hedera-services by hashgraph.
the class DatabaseState method setupDatabase.
@Setup(Level.Trial)
public void setupDatabase() throws IOException {
System.out.println("dataSourcePath = " + dataSourcePath + " mergingEnabled=" + mergingEnabled);
if (Files.exists(dataSourcePath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db.");
deleteDirectoryAndContents(dataSourcePath);
}
if (Files.exists(dataSourceSnapshotPath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db snapshot.");
deleteDirectoryAndContents(dataSourceSnapshotPath);
}
// create data source
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(dataSourcePath).maxNumOfKeys(500_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(0).mergingEnabled(mergingEnabled);
dataSource = dbBuilder.build("jdb", "4dbState");
// populate with initial data
System.out.printf("Creating initial data set of %,d leaves\n", initialDataSize);
progressPercentage = 0;
final long firstLeafPath = initialDataSize;
final long lastLeafPath = firstLeafPath + initialDataSize;
var internalRecordStream = LongStream.range(0, firstLeafPath).mapToObj(path -> new VirtualInternalRecord(path, hash((int) path))).peek(internalRecord -> printProgress(internalRecord.getPath(), lastLeafPath));
var leafRecordStream = LongStream.range(firstLeafPath, lastLeafPath + 1).mapToObj(path -> new VirtualLeafRecord<>(path, hash((int) path), new ContractKey(path, path), new ContractValue(path))).peek(leaf -> printProgress(leaf.getPath(), lastLeafPath));
dataSource.saveRecords(firstLeafPath, lastLeafPath, internalRecordStream, leafRecordStream, Stream.empty());
System.out.printf("Done creating initial data set of %,d leaves\n", initialDataSize);
}
Aggregations