use of com.hedera.services.bdd.suites.perf.PerfTestLoadSettings in project hedera-services by hashgraph.
the class FileUpdateLoadTest method runFileUpdates.
private HapiApiSpec runFileUpdates() {
PerfTestLoadSettings settings = new PerfTestLoadSettings();
final AtomicInteger submittedSoFar = new AtomicInteger(0);
final byte[] NEW_CONTENTS = TxnUtils.randomUtf8Bytes(TxnUtils.BYTES_4K);
Supplier<HapiSpecOperation[]> fileUpdateBurst = () -> new HapiSpecOperation[] { inParallel(IntStream.range(0, settings.getBurstSize()).mapToObj(i -> TxnVerbs.fileUpdate("target").fee(Integer.MAX_VALUE).contents(NEW_CONTENTS).noLogging().hasPrecheckFrom(OK, BUSY, DUPLICATE_TRANSACTION, PLATFORM_TRANSACTION_NOT_CREATED).deferStatusResolution()).toArray(n -> new HapiSpecOperation[n])), logIt(ignore -> String.format("Now a total of %d file updates submitted.", submittedSoFar.addAndGet(settings.getBurstSize()))) };
return defaultHapiSpec("RunFileUpdates").given(withOpContext((spec, ignore) -> settings.setFrom(spec.setup().ciPropertiesMap())), logIt(ignore -> settings.toString())).when(fileCreate("target").contents("The initial contents!")).then(runLoadTest(fileUpdateBurst).tps(settings::getTps).tolerance(settings::getTolerancePercentage).allowedSecsBelow(settings::getAllowedSecsBelow).lasting(settings::getMins, () -> MINUTES));
}
use of com.hedera.services.bdd.suites.perf.PerfTestLoadSettings in project hedera-services by hashgraph.
the class MixedFileOpsLoadTest method runMixedFileOps.
protected HapiApiSpec runMixedFileOps() {
PerfTestLoadSettings settings = new PerfTestLoadSettings();
final AtomicInteger submittedSoFar = new AtomicInteger(0);
String initialContent = "The initial contents!";
String targetFile = "targetFile";
Supplier<HapiSpecOperation[]> mixedFileOpsBurst = () -> new HapiSpecOperation[] { fileCreate(targetFile + submittedSoFar.getAndIncrement()).contents(initialContent).hasKnownStatusFrom(SUCCESS, UNKNOWN), fileUpdate(targetFile).fee(ONE_HUNDRED_HBARS).contents(TxnUtils.randomUtf8Bytes(TxnUtils.BYTES_4K)).noLogging().payingWith(GENESIS).hasAnyPrecheck().hasKnownStatusFrom(SUCCESS, UNKNOWN).deferStatusResolution(), fileAppend(targetFile).content("dummy").hasAnyPrecheck().payingWith(GENESIS).fee(ONE_HUNDRED_HBARS).hasKnownStatusFrom(SUCCESS, UNKNOWN).deferStatusResolution() };
return defaultHapiSpec("runMixedFileOps").given(withOpContext((spec, ignore) -> settings.setFrom(spec.setup().ciPropertiesMap())), logIt(ignore -> settings.toString())).when(fileCreate(targetFile).contents(initialContent).hasAnyPrecheck().payingWith(GENESIS), getFileInfo(targetFile).logging().payingWith(GENESIS)).then(defaultLoadTest(mixedFileOpsBurst, settings));
}
use of com.hedera.services.bdd.suites.perf.PerfTestLoadSettings in project hedera-services by hashgraph.
the class MixedOpsLoadTest method runMixedOps.
protected HapiApiSpec runMixedOps() {
PerfTestLoadSettings settings = new PerfTestLoadSettings();
Random r = new Random();
AtomicInteger tokenId = new AtomicInteger(0);
AtomicInteger scheduleId = new AtomicInteger(0);
Supplier<HapiSpecOperation[]> mixedOpsBurst = () -> new HapiSpecOperation[] { cryptoTransfer(tinyBarsFromTo(sender, receiver, 1L)).noLogging().payingWith(sender).signedBy(GENESIS).suppressStats(true).fee(ONE_HBAR).hasKnownStatusFrom(SUCCESS, OK, INSUFFICIENT_PAYER_BALANCE, UNKNOWN, TRANSACTION_EXPIRED).hasRetryPrecheckFrom(BUSY, DUPLICATE_TRANSACTION, PLATFORM_TRANSACTION_NOT_CREATED, PAYER_ACCOUNT_NOT_FOUND).deferStatusResolution(), submitMessageTo(topic).message(ArrayUtils.addAll(ByteBuffer.allocate(8).putLong(Instant.now().toEpochMilli()).array(), randomUtf8Bytes(messageSize - 8))).noLogging().payingWith(GENESIS).signedBy(sender, submitKey).fee(ONE_HBAR).suppressStats(true).hasRetryPrecheckFrom(BUSY, DUPLICATE_TRANSACTION, PLATFORM_TRANSACTION_NOT_CREATED, TOPIC_EXPIRED, INVALID_TOPIC_ID, INSUFFICIENT_PAYER_BALANCE).hasKnownStatusFrom(SUCCESS, OK, INVALID_TOPIC_ID, INSUFFICIENT_PAYER_BALANCE, UNKNOWN, TRANSACTION_EXPIRED).deferStatusResolution(), r.nextInt(100) > 5 ? cryptoTransfer(moving(1, token + r.nextInt(NUM_SUBMISSIONS)).between(sender, receiver)).payingWith(sender).signedBy(GENESIS).fee(ONE_HUNDRED_HBARS).noLogging().suppressStats(true).hasPrecheckFrom(OK, INSUFFICIENT_PAYER_BALANCE, EMPTY_TOKEN_TRANSFER_ACCOUNT_AMOUNTS, DUPLICATE_TRANSACTION).hasRetryPrecheckFrom(permissiblePrechecks).hasKnownStatusFrom(SUCCESS, OK, INSUFFICIENT_TOKEN_BALANCE, TRANSACTION_EXPIRED, INVALID_TOKEN_ID, UNKNOWN, TOKEN_NOT_ASSOCIATED_TO_ACCOUNT).deferStatusResolution() : scheduleSign(schedule + "-" + getHostName() + "-" + r.nextInt(NUM_SUBMISSIONS)).ignoreIfMissing().noLogging().alsoSigningWith(receiver).hasPrecheckFrom(OK, INVALID_SCHEDULE_ID).hasKnownStatusFrom(SUCCESS, OK, TRANSACTION_EXPIRED, INVALID_SCHEDULE_ID, UNKNOWN, SCHEDULE_ALREADY_EXECUTED).fee(ONE_HBAR).deferStatusResolution() };
return defaultHapiSpec("RunMixedOps").given(withOpContext((spec, ignore) -> settings.setFrom(spec.setup().ciPropertiesMap())), logIt(ignore -> settings.toString()), newKeyNamed("submitKey"), tokenOpsEnablement(), scheduleOpsEnablement(), cryptoCreate("treasury").hasRetryPrecheckFrom(permissiblePrechecks).key(GENESIS)).when(fileUpdate(APP_PROPERTIES).payingWith(GENESIS).overridingProps(Map.of("hapi.throttling.buckets.fastOpBucket.capacity", "1300000.0", "hapi.throttling.ops.consensusUpdateTopic.capacityRequired", "1.0", "hapi.throttling.ops.consensusGetTopicInfo.capacityRequired", "1.0", "hapi.throttling.ops.consensusSubmitMessage.capacityRequired", "1.0", "tokens.maxPerAccount", "10000000")), cryptoCreate(sender).balance(initialBalance.getAsLong()).withRecharging().key(GENESIS).rechargeWindow(3).hasRetryPrecheckFrom(permissiblePrechecks), cryptoCreate(receiver).hasRetryPrecheckFrom(permissiblePrechecks).key(GENESIS), createTopic(topic).submitKeyName("submitKey"), inParallel(IntStream.range(0, NUM_SUBMISSIONS).mapToObj(ignore -> tokenCreate("token" + tokenId.getAndIncrement()).payingWith(GENESIS).signedBy(GENESIS).fee(ONE_HUNDRED_HBARS).initialSupply(ONE_HUNDRED_HBARS).treasury("treasury").hasRetryPrecheckFrom(permissiblePrechecks).hasPrecheckFrom(DUPLICATE_TRANSACTION, OK).deferStatusResolution().noLogging()).toArray(n -> new HapiSpecOperation[n])), sleepFor(10000), inParallel(IntStream.range(0, NUM_SUBMISSIONS).mapToObj(ignore -> scheduleCreate("schedule-" + getHostName() + "-" + scheduleId.getAndIncrement(), cryptoTransfer(tinyBarsFromTo(sender, receiver, 1))).signedBy(DEFAULT_PAYER).fee(ONE_HUNDRED_HBARS).alsoSigningWith(sender).hasPrecheckFrom(STANDARD_PERMISSIBLE_PRECHECKS).hasAnyKnownStatus().deferStatusResolution().adminKey(DEFAULT_PAYER).noLogging()).toArray(n -> new HapiSpecOperation[n])), sleepFor(10000), inParallel(IntStream.range(0, NUM_SUBMISSIONS).mapToObj(i -> tokenAssociate(sender, "token" + i).payingWith(GENESIS).signedBy(GENESIS).hasRetryPrecheckFrom(permissiblePrechecks).hasPrecheckFrom(DUPLICATE_TRANSACTION, OK).hasKnownStatusFrom(SUCCESS, TOKEN_ALREADY_ASSOCIATED_TO_ACCOUNT, INVALID_TOKEN_ID, TRANSACTION_EXPIRED, TOKENS_PER_ACCOUNT_LIMIT_EXCEEDED, OK).fee(ONE_HUNDRED_HBARS).suppressStats(true).deferStatusResolution().noLogging()).toArray(n -> new HapiSpecOperation[n])), sleepFor(10000)).then(defaultLoadTest(mixedOpsBurst, settings));
}
use of com.hedera.services.bdd.suites.perf.PerfTestLoadSettings in project hedera-services by hashgraph.
the class MixedOpsMemoPerfSuite method runMixedMemoOps.
// perform cryptoCreate, cryptoUpdate, TokenCreate, TokenUpdate, FileCreate, FileUpdate txs with entity memo set.
protected HapiApiSpec runMixedMemoOps() {
PerfTestLoadSettings settings = new PerfTestLoadSettings();
final AtomicInteger createdSoFar = new AtomicInteger(0);
Supplier<HapiSpecOperation[]> mixedOpsBurst = () -> new HapiSpecOperation[] { cryptoCreate("testAccount" + createdSoFar.getAndIncrement()).balance(1L).fee(100_000_000L).payingWith(GENESIS).entityMemo(new String(TxnUtils.randomUtf8Bytes(memoLength.getAsInt()), StandardCharsets.UTF_8)).noLogging().hasPrecheckFrom(permissiblePrechecks).deferStatusResolution(), getAccountInfo(TARGET_ACCOUNT + "Info").payingWith(GENESIS).has(accountWith().memo(ACCOUNT_MEMO)).hasAnswerOnlyPrecheckFrom(permissiblePrechecks).hasCostAnswerPrecheckFrom(permissiblePrechecks).noLogging(), cryptoUpdate(TARGET_ACCOUNT).payingWith(GENESIS).entityMemo(new String(TxnUtils.randomUtf8Bytes(memoLength.getAsInt()), StandardCharsets.UTF_8)).noLogging().hasPrecheckFrom(permissiblePrechecks).deferStatusResolution(), tokenCreate("testToken" + createdSoFar.getAndIncrement()).payingWith(GENESIS).entityMemo(new String(TxnUtils.randomUtf8Bytes(memoLength.getAsInt()), StandardCharsets.UTF_8)).noLogging().hasPrecheckFrom(permissiblePrechecks).deferStatusResolution(), getTokenInfo(TARGET_TOKEN + "Info").payingWith(GENESIS).hasEntityMemo(TOKEN_MEMO).hasAnswerOnlyPrecheckFrom(permissiblePrechecks).hasCostAnswerPrecheckFrom(permissiblePrechecks).noLogging(), tokenUpdate(TARGET_TOKEN).payingWith(GENESIS).entityMemo(new String(TxnUtils.randomUtf8Bytes(memoLength.getAsInt()), StandardCharsets.UTF_8)).noLogging().hasPrecheckFrom(permissiblePrechecks).deferStatusResolution(), createTopic("testTopic" + createdSoFar.getAndIncrement()).topicMemo(new String(TxnUtils.randomUtf8Bytes(memoLength.getAsInt()), StandardCharsets.UTF_8)).payingWith(GENESIS).adminKeyName("adminKey").noLogging().hasPrecheckFrom(permissiblePrechecks).deferStatusResolution(), getTopicInfo(TARGET_TOPIC + "Info").payingWith(GENESIS).hasMemo(TOPIC_MEMO).hasAnswerOnlyPrecheckFrom(permissiblePrechecks).hasCostAnswerPrecheckFrom(permissiblePrechecks).noLogging(), updateTopic(TARGET_TOPIC).topicMemo(new String(TxnUtils.randomUtf8Bytes(memoLength.getAsInt()), StandardCharsets.UTF_8)).payingWith(GENESIS).adminKey("adminKey").noLogging().hasPrecheckFrom(permissiblePrechecks).deferStatusResolution() };
return defaultHapiSpec("RunMixedMemoOps").given(withOpContext((spec, ignore) -> settings.setFrom(spec.setup().ciPropertiesMap())), logIt(ignore -> settings.toString()), tokenOpsEnablement()).when(fileUpdate(APP_PROPERTIES).payingWith(GENESIS).overridingProps(Map.of("hapi.throttling.buckets.fastOpBucket.capacity", "1300000.0", "hapi.throttling.ops.consensusUpdateTopic.capacityRequired", "1.0", "hapi.throttling.ops.consensusGetTopicInfo.capacityRequired", "1.0", "hapi.throttling.ops.consensusSubmitMessage.capacityRequired", "1.0", "tokens.maxPerAccount", "10000000")), sleepFor(5000), newKeyNamed("adminKey"), logIt(ignore -> settings.toString()), cryptoCreate(TARGET_ACCOUNT).fee(100_000_000L).payingWith(GENESIS).entityMemo("Memo Length :" + settings.getMemoLength()).logged(), getAccountInfo(TARGET_ACCOUNT).logged(), cryptoCreate(TARGET_ACCOUNT + "Info").fee(100_000_000L).payingWith(GENESIS).entityMemo(ACCOUNT_MEMO).logged(), createTopic(TARGET_TOPIC).topicMemo(TOPIC_MEMO).adminKeyName("adminKey").payingWith(GENESIS).logged(), createTopic(TARGET_TOPIC + "Info").payingWith(GENESIS).adminKeyName("adminKey").topicMemo(TOPIC_MEMO).logged(), tokenCreate(TARGET_TOKEN).entityMemo(TOKEN_MEMO).payingWith(GENESIS).logged(), tokenCreate(TARGET_TOKEN + "Info").entityMemo(TOKEN_MEMO).payingWith(GENESIS).logged()).then(defaultLoadTest(mixedOpsBurst, settings));
}
use of com.hedera.services.bdd.suites.perf.PerfTestLoadSettings in project hedera-services by hashgraph.
the class TokenTransferBasicLoadTest method tokenCreatesFactory.
private Function<HapiApiSpec, OpProvider> tokenCreatesFactory(PerfTestLoadSettings settings) {
int numTotalTokens = settings.getTotalTokens();
int totalClients = settings.getTotalClients();
int numActiveTokens = (totalClients >= 1) ? numTotalTokens / totalClients : numTotalTokens;
AtomicInteger remaining = new AtomicInteger(numActiveTokens - 1);
return spec -> new OpProvider() {
@Override
public List<HapiSpecOperation> suggestedInitializers() {
return Collections.emptyList();
}
@Override
public Optional<HapiSpecOperation> get() {
int next;
if ((next = remaining.getAndDecrement()) < 0) {
return Optional.empty();
}
var payingTreasury = String.format("0.0.%d", settings.getTestTreasureStartAccount() + next);
var op = tokenCreate(tokenRegistryName(next)).payingWith(DEFAULT_PAYER).signedBy(DEFAULT_PAYER).fee(ONE_HUNDRED_HBARS).initialSupply(100_000_000_000L).treasury(payingTreasury).hasRetryPrecheckFrom(BUSY, PLATFORM_TRANSACTION_NOT_CREATED, DUPLICATE_TRANSACTION, INSUFFICIENT_PAYER_BALANCE).hasPrecheckFrom(DUPLICATE_TRANSACTION, OK).hasKnownStatusFrom(SUCCESS, TOKEN_ALREADY_ASSOCIATED_TO_ACCOUNT, TOKENS_PER_ACCOUNT_LIMIT_EXCEEDED, FAIL_INVALID).suppressStats(true).noLogging();
return Optional.of(op);
}
};
}
Aggregations