use of com.swirlds.virtualmap.VirtualMap in project hedera-services by hashgraph.
the class ContractBench method prepare.
@Setup
public void prepare() throws Exception {
pipeline = new Pipeline<>();
final long estimatedNumKeyValuePairs = (long) (numContracts * (1 - bigPercent - hugePercent) * ((kbPerContract * 1024L) / ESTIMATED_KEY_VALUE_SIZE)) + (long) (numContracts * bigPercent * ((kbPerBigContract * 1024L) / ESTIMATED_KEY_VALUE_SIZE)) + (long) (numContracts * hugePercent * ((kbPerHugeContract * 1024L) / ESTIMATED_KEY_VALUE_SIZE));
System.out.println("estimatedNumKeyValuePairs = " + estimatedNumKeyValuePairs);
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
Path dataSourcePath = getDataSourcePath(dsType);
boolean dataSourceDirExisted = Files.exists(dataSourcePath);
virtualMap = createMap(dsType, virtualLeafRecordSerializer, new ContractKeySerializer(), estimatedNumKeyValuePairs, dataSourcePath, preferDiskBasedIndexes);
txProcessor = new TransactionProcessor<>(preFetchEventHandlers, (Transaction<Data> tx) -> {
// preFetch logic
VirtualMap<ContractKey, ContractValue> map = getVirtualMap();
final Data data = tx.getData();
data.value1 = map.getForModify(data.key1);
data.value2 = map.getForModify(data.key2);
}, (Transaction<Data> tx) -> {
// handleTransaction logic
final Data data = tx.getData();
data.value1.setValue(data.value1.asLong() - data.transferAmount);
data.value2.setValue(data.value2.asLong() + data.transferAmount);
});
// We generate a different number of key/value pairs depending on whether it is
// a huge contract, big contract, or normal contract
int numBigContracts = (int) (numContracts * bigPercent);
System.out.println("numBigContracts = " + numBigContracts);
int numHugeContracts = (int) (numContracts * hugePercent);
System.out.println("numHugeContracts = " + numHugeContracts);
keyValuePairsPerContract = new int[numContracts];
for (int i = 0; i < numContracts; i++) {
final int kb;
if (i > 0 && (i % 100) == 0 && numHugeContracts > 0) {
kb = kbPerHugeContract;
numHugeContracts--;
} else if (i > 0 && (i % 10) == 0 && numBigContracts > 0) {
kb = kbPerBigContract;
numBigContracts--;
} else {
kb = kbPerContract;
}
final var numKeyValuePairs = (kb * 1024L) / ESTIMATED_KEY_VALUE_SIZE;
keyValuePairsPerContract[i] = (int) numKeyValuePairs;
}
if (!dataSourceDirExisted && preFill) {
long countOfKeyValuePairs = 0;
long lastCountOfKeyValuePairs = 0;
for (int i = 0; i < numContracts; i++) {
if ((countOfKeyValuePairs - lastCountOfKeyValuePairs) > 100_000) {
lastCountOfKeyValuePairs = countOfKeyValuePairs;
System.out.printf("Completed: %,d contracts and %,d key/value pairs\n", i, countOfKeyValuePairs);
virtualMap = pipeline.endRound(virtualMap);
}
if (i > 0 && i % 10000 == 0) {
System.out.println("=============== GC =======================");
// loading is really intense so give GC a chance to catch up
System.gc();
Thread.sleep(1000);
}
final int numKeyValuePairs = keyValuePairsPerContract[i];
for (int j = 0; j < numKeyValuePairs; j++) {
final var key = asContractKey(i, j);
final var value = new ContractValue(j);
try {
virtualMap.put(key, value);
} catch (Exception e) {
e.printStackTrace();
System.err.println(i + ":" + j);
throw e;
}
}
countOfKeyValuePairs += numKeyValuePairs;
}
// During setup, we perform the full hashing and release the old copy. This way,
// during the tests, we don't have an initial slow hash.
System.out.printf("Completed: %,d contracts and %,d key/value pairs\n", numContracts, countOfKeyValuePairs);
virtualMap = pipeline.endRound(virtualMap);
} else {
System.out.println("NOT PRE_FILLING AS LOADED FROM FILES OR TURNED OFF WITH FLAG!");
}
printDataStoreSize();
// create a snapshot every 15min
DateFormat df = new SimpleDateFormat("yyyy-MM-dd--HH-mm");
ScheduledExecutorService snapshotting = Executors.newScheduledThreadPool(1, runnable -> new Thread(runnable, "Snapshot"));
snapshotting.scheduleWithFixedDelay(() -> {
final Path snapshotDir = Path.of("jasperdb_snapshot_" + df.format(new Date()));
System.out.println("************ STARTING SNAPSHOT [" + snapshotDir.toAbsolutePath() + "] ***********");
long START = System.currentTimeMillis();
try {
virtualMap.getDataSource().snapshot(snapshotDir);
} catch (IOException e) {
e.printStackTrace();
}
double tookSeconds = (System.currentTimeMillis() - START) * Units.MILLISECONDS_TO_SECONDS;
System.out.printf("************ SNAPSHOT FINISHED took %,3f seconds [%s] ***********\n", tookSeconds, snapshotDir.toAbsolutePath());
}, 0, 5, TimeUnit.MINUTES);
}
use of com.swirlds.virtualmap.VirtualMap in project hedera-services by hashgraph.
the class CryptoHbarBench method prepare.
@Setup
public void prepare() throws Exception {
pipeline = new Pipeline<>();
VirtualLeafRecordSerializer<Id, Account> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, Id.SERIALIZED_SIZE, new IdSupplier(), (short) 1, Account.SERIALIZED_SIZE, new AccountSupplier(), false);
Path dataSourcePath = getDataSourcePath(dsType);
boolean dataSourceDirExisted = Files.exists(dataSourcePath);
virtualMap = createMap(dsType, virtualLeafRecordSerializer, new Id.IdKeySerializer(), numEntities, dataSourcePath, false);
final var rand = new Random();
txProcessor = new TransactionProcessor<>(preFetchEventHandlers, (Transaction<Data> tx) -> {
// preFetch logic
VirtualMap<Id, Account> map = getVirtualMap();
final Data data = tx.getData();
final Account sender = map.getForModify(data.getSenderId());
if (sender == null) {
System.out.println("NULL SENDER " + data.getSenderId() + ", last = " + tx.isLast());
}
final Account receiver = map.getForModify(data.getReceiverId());
if (receiver == null)
System.out.println("NULL RECEIVER " + data.getReceiverId() + ", last = " + tx.isLast());
data.setSender(map.getForModify(data.getSenderId()));
data.setReceiver(map.getForModify(data.getReceiverId()));
}, (Transaction<Data> tx) -> {
// handleTransaction logic
final var tinyBars = rand.nextInt(10);
final Data data = tx.getData();
final Account sender = data.getSender();
final Account receiver = data.getReceiver();
sender.setHbarBalance(sender.getHbarBalance() - tinyBars);
receiver.setHbarBalance(receiver.getHbarBalance() + tinyBars);
});
long START = System.currentTimeMillis();
if (!dataSourceDirExisted && preFill) {
for (int i = 0; i < numEntities; i++) {
if (i % 100000 == 0 && i > 0) {
final long END = System.currentTimeMillis();
double tookSeconds = (END - START) / 1000d;
START = END;
System.out.printf("Completed: %,d in %,.2f seconds\n", i, tookSeconds);
virtualMap = pipeline.endRound(virtualMap);
}
if (numEntities > 100_000_000) {
// for large data loads give the GC a chance as we crate object like a crazy beast!
if (i % 1_000_000 == 0 && i > 0) {
System.gc();
// noinspection BusyWait
Thread.sleep(2000);
System.gc();
// noinspection BusyWait
Thread.sleep(2000);
}
}
final var key = asId(i);
final var value = asAccount(i);
try {
virtualMap.put(key, value);
} catch (Exception e) {
e.printStackTrace();
System.err.println(i);
throw e;
}
}
System.out.printf("Completed: %,d\n", numEntities);
virtualMap = pipeline.endRound(virtualMap);
}
printDataStoreSize();
// prepNextRound();
}
use of com.swirlds.virtualmap.VirtualMap in project hedera-services by hashgraph.
the class VirtualMapFactory method newVirtualizedBlobs.
public VirtualMap<VirtualBlobKey, VirtualBlobValue> newVirtualizedBlobs() {
final var blobKeySerializer = new VirtualBlobKeySerializer();
final VirtualLeafRecordSerializer<VirtualBlobKey, VirtualBlobValue> blobLeafRecordSerializer = new VirtualLeafRecordSerializer<>(CURRENT_SERIALIZATION_VERSION, DigestType.SHA_384, CURRENT_SERIALIZATION_VERSION, VirtualBlobKey.sizeInBytes(), new VirtualBlobKeySupplier(), CURRENT_SERIALIZATION_VERSION, VirtualBlobValue.sizeInBytes(), new VirtualBlobValueSupplier(), false);
final JasperDbBuilder<VirtualBlobKey, VirtualBlobValue> dsBuilder = jdbBuilderFactory.newJdbBuilder();
dsBuilder.virtualLeafRecordSerializer(blobLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(blobKeySerializer).maxNumOfKeys(MAX_BLOBS).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(MAX_IN_MEMORY_INTERNAL_HASHES);
return new VirtualMap<>(BLOBS_VM_NAME, dsBuilder);
}
use of com.swirlds.virtualmap.VirtualMap in project hedera-services by hashgraph.
the class VirtualMapFactory method newVirtualizedStorage.
public VirtualMap<ContractKey, ContractValue> newVirtualizedStorage() {
final var storageKeySerializer = new ContractKeySerializer();
final VirtualLeafRecordSerializer<ContractKey, ContractValue> storageLeafRecordSerializer = new VirtualLeafRecordSerializer<>(CURRENT_SERIALIZATION_VERSION, DigestType.SHA_384, CURRENT_SERIALIZATION_VERSION, storageKeySerializer.getSerializedSize(), new ContractKeySupplier(), CURRENT_SERIALIZATION_VERSION, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
final JasperDbBuilder<ContractKey, ContractValue> dsBuilder = jdbBuilderFactory.newJdbBuilder();
dsBuilder.virtualLeafRecordSerializer(storageLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(storageKeySerializer).maxNumOfKeys(MAX_STORAGE_ENTRIES).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(MAX_IN_MEMORY_INTERNAL_HASHES);
return new VirtualMap<>(STORAGE_VM_NAME, dsBuilder);
}
use of com.swirlds.virtualmap.VirtualMap in project hedera-services by hashgraph.
the class StateViewTest method setup.
@BeforeEach
@SuppressWarnings("unchecked")
private void setup() throws Throwable {
metadata = new HFileMeta(false, TxnHandlingScenario.MISC_FILE_WACL_KT.asJKey(), expiry, fileMemo);
immutableMetadata = new HFileMeta(false, StateView.EMPTY_WACL, expiry);
expectedImmutable = FileGetInfoResponse.FileInfo.newBuilder().setLedgerId(ledgerId).setDeleted(false).setExpirationTime(Timestamp.newBuilder().setSeconds(expiry)).setFileID(target).setSize(data.length).build();
expected = expectedImmutable.toBuilder().setKeys(TxnHandlingScenario.MISC_FILE_WACL_KT.asKey().getKeyList()).setMemo(fileMemo).build();
tokenAccount = MerkleAccountFactory.newAccount().isSmartContract(false).tokens(tokenId).get();
tokenAccount.setNftsOwned(10);
tokenAccount.setMaxAutomaticAssociations(123);
tokenAccount.setAlias(TxnHandlingScenario.TOKEN_ADMIN_KT.asKey().getEd25519());
contract = MerkleAccountFactory.newAccount().alias(create2Address).memo("Stay cold...").numKvPairs(wellKnownNumKvPairs).isSmartContract(true).accountKeys(COMPLEX_KEY_ACCOUNT_KT).proxy(asAccount("0.0.3")).senderThreshold(1_234L).receiverThreshold(4_321L).receiverSigRequired(true).balance(555L).autoRenewPeriod(1_000_000L).deleted(true).expirationTime(9_999_999L).get();
contracts = (MerkleMap<EntityNum, MerkleAccount>) mock(MerkleMap.class);
topics = (MerkleMap<EntityNum, MerkleTopic>) mock(MerkleMap.class);
tokenRels = new MerkleMap<>();
tokenRels.put(EntityNumPair.fromLongs(tokenAccountId.getAccountNum(), tokenId.getTokenNum()), new MerkleTokenRelStatus(123L, false, true, true));
tokenStore = mock(TokenStore.class);
token = new MerkleToken(Long.MAX_VALUE, 100, 1, "UnfrozenToken", "UnfrozenTokenName", true, true, new EntityId(0, 0, 3));
token.setMemo(tokenMemo);
token.setAdminKey(TxnHandlingScenario.TOKEN_ADMIN_KT.asJKey());
token.setFreezeKey(TxnHandlingScenario.TOKEN_FREEZE_KT.asJKey());
token.setKycKey(TxnHandlingScenario.TOKEN_KYC_KT.asJKey());
token.setSupplyKey(COMPLEX_KEY_ACCOUNT_KT.asJKey());
token.setWipeKey(MISC_ACCOUNT_KT.asJKey());
token.setFeeScheduleKey(MISC_ACCOUNT_KT.asJKey());
token.setPauseKey(TxnHandlingScenario.TOKEN_PAUSE_KT.asJKey());
token.setAutoRenewAccount(EntityId.fromGrpcAccountId(autoRenew));
token.setExpiry(expiry);
token.setAutoRenewPeriod(autoRenewPeriod);
token.setDeleted(true);
token.setPaused(true);
token.setTokenType(TokenType.FUNGIBLE_COMMON);
token.setSupplyType(TokenSupplyType.FINITE);
token.setFeeScheduleFrom(grpcCustomFees);
scheduleStore = mock(ScheduleStore.class);
final var scheduleMemo = "For what but eye and ear";
parentScheduleCreate = scheduleCreateTxnWith(SCHEDULE_ADMIN_KT.asKey(), scheduleMemo, payerAccountId, creatorAccountID, MiscUtils.asTimestamp(now.toJava()));
schedule = MerkleSchedule.from(parentScheduleCreate.toByteArray(), expiry);
schedule.witnessValidSignature("01234567890123456789012345678901".getBytes());
schedule.witnessValidSignature("_123456789_123456789_123456789_1".getBytes());
schedule.witnessValidSignature("_o23456789_o23456789_o23456789_o".getBytes());
contents = mock(Map.class);
attrs = mock(Map.class);
bytecode = mock(Map.class);
specialFiles = mock(MerkleSpecialFiles.class);
mockTokenRelsFn = (BiFunction<StateView, EntityNum, List<TokenRelationship>>) mock(BiFunction.class);
StateView.tokenRelsFn = mockTokenRelsFn;
final var uniqueTokens = new MerkleMap<EntityNumPair, MerkleUniqueToken>();
uniqueTokens.put(targetNftKey, targetNft);
uniqueTokens.put(treasuryNftKey, treasuryNft);
storage = (VirtualMap<VirtualBlobKey, VirtualBlobValue>) mock(VirtualMap.class);
contractStorage = (VirtualMap<ContractKey, ContractValue>) mock(VirtualMap.class);
children = new MutableStateChildren();
children.setUniqueTokens(uniqueTokens);
children.setAccounts(contracts);
children.setTokenAssociations(tokenRels);
children.setSpecialFiles(specialFiles);
networkInfo = mock(NetworkInfo.class);
subject = new StateView(tokenStore, scheduleStore, children, networkInfo);
subject.fileAttrs = attrs;
subject.fileContents = contents;
subject.contractBytecode = bytecode;
}
Aggregations