use of com.hedera.services.bdd.spec.HapiSpecOperation in project hedera-services by hashgraph.
the class UtilVerbs method updateSpecialFile.
public static HapiSpecOperation updateSpecialFile(final String payer, final String fileName, final ByteString contents, final int bytesPerOp, final int appendsPerBurst) {
return withOpContext((spec, opLog) -> {
final var bytesToUpload = contents.size();
final var bytesToAppend = bytesToUpload - bytesPerOp;
final var appendsRequired = bytesToAppend / bytesPerOp + Math.min(1, bytesToAppend % bytesPerOp);
COMMON_MESSAGES.info("Beginning update for " + fileName + " (" + appendsRequired + " appends required)");
final var numBursts = appendsRequired / appendsPerBurst + Math.min(1, appendsRequired % appendsPerBurst);
int position = Math.min(bytesPerOp, bytesToUpload);
final var updateSubOp = fileUpdate(fileName).fee(ONE_HUNDRED_HBARS).contents(contents.substring(0, position)).alertingPre(fid -> COMMON_MESSAGES.info("Submitting initial update for file 0.0." + fid.getFileNum())).alertingPost(code -> COMMON_MESSAGES.info("Finished initial update with " + code)).noLogging().payingWith(payer).signedBy(payer);
allRunFor(spec, updateSubOp);
final AtomicInteger burstNo = new AtomicInteger(1);
while (position < bytesToUpload) {
final var totalBytesLeft = bytesToUpload - position;
final var appendsLeft = totalBytesLeft / bytesPerOp + Math.min(1, totalBytesLeft % bytesPerOp);
final var appendsHere = new AtomicInteger(Math.min(appendsPerBurst, appendsLeft));
boolean isFirstAppend = true;
final List<HapiSpecOperation> theBurst = new ArrayList<>();
final CountDownLatch burstLatch = new CountDownLatch(1);
final AtomicReference<Instant> burstStart = new AtomicReference<>();
while (appendsHere.getAndDecrement() >= 0) {
final var bytesLeft = bytesToUpload - position;
final var bytesThisAppend = Math.min(bytesLeft, bytesPerOp);
final var newPosition = position + bytesThisAppend;
final var appendSubOp = fileAppend(fileName).content(contents.substring(position, newPosition).toByteArray()).fee(ONE_HUNDRED_HBARS).noLogging().payingWith(payer).signedBy(payer).deferStatusResolution();
if (isFirstAppend) {
final var fixedBurstNo = burstNo.get();
final var fixedAppendsHere = appendsHere.get() + 1;
appendSubOp.alertingPre(fid -> {
burstStart.set(Instant.now());
COMMON_MESSAGES.info("Starting burst " + fixedBurstNo + "/" + numBursts + " (" + fixedAppendsHere + " ops)");
});
isFirstAppend = false;
}
if (appendsHere.get() < 0) {
final var fixedBurstNo = burstNo.get();
appendSubOp.alertingPost(code -> {
final var burstSecs = Duration.between(burstStart.get(), Instant.now()).getSeconds();
COMMON_MESSAGES.info("Completed burst #" + fixedBurstNo + "/" + numBursts + " in " + burstSecs + "s with " + code);
burstLatch.countDown();
});
}
theBurst.add(appendSubOp);
position = newPosition;
}
allRunFor(spec, theBurst);
burstLatch.await();
burstNo.getAndIncrement();
}
});
}
use of com.hedera.services.bdd.spec.HapiSpecOperation in project hedera-services by hashgraph.
the class UtilVerbs method updateLargeFile.
public static HapiSpecOperation updateLargeFile(String payer, String fileName, ByteString byteString, boolean signOnlyWithPayer, OptionalLong tinyBarsToOffer, Consumer<HapiFileUpdate> updateCustomizer, ObjIntConsumer<HapiFileAppend> appendCustomizer) {
return withOpContext((spec, ctxLog) -> {
List<HapiSpecOperation> opsList = new ArrayList<>();
int fileSize = byteString.size();
int position = Math.min(BYTES_4K, fileSize);
HapiFileUpdate updateSubOp = fileUpdate(fileName).contents(byteString.substring(0, position)).hasKnownStatusFrom(SUCCESS, FEE_SCHEDULE_FILE_PART_UPLOADED).noLogging().payingWith(payer);
updateCustomizer.accept(updateSubOp);
if (tinyBarsToOffer.isPresent()) {
updateSubOp = updateSubOp.fee(tinyBarsToOffer.getAsLong());
}
if (signOnlyWithPayer) {
updateSubOp = updateSubOp.signedBy(payer);
}
opsList.add(updateSubOp);
final int bytesLeft = fileSize - position;
final int totalAppendsRequired = bytesLeft / BYTES_4K + Math.min(1, bytesLeft % BYTES_4K);
int numAppends = 0;
while (position < fileSize) {
int newPosition = Math.min(fileSize, position + BYTES_4K);
var appendSubOp = fileAppend(fileName).content(byteString.substring(position, newPosition).toByteArray()).hasKnownStatusFrom(SUCCESS, FEE_SCHEDULE_FILE_PART_UPLOADED).noLogging().payingWith(payer);
appendCustomizer.accept(appendSubOp, totalAppendsRequired - numAppends);
if (tinyBarsToOffer.isPresent()) {
appendSubOp = appendSubOp.fee(tinyBarsToOffer.getAsLong());
}
if (signOnlyWithPayer) {
appendSubOp = appendSubOp.signedBy(payer);
}
opsList.add(appendSubOp);
position = newPosition;
numAppends++;
}
CustomSpecAssert.allRunFor(spec, opsList);
});
}
use of com.hedera.services.bdd.spec.HapiSpecOperation in project hedera-services by hashgraph.
the class UtilVerbs method contractListWithPropertiesInheritedFrom.
public static HapiSpecOperation contractListWithPropertiesInheritedFrom(final String contractList, final long expectedSize, final String parent) {
return withOpContext((spec, ctxLog) -> {
List<HapiSpecOperation> opsList = new ArrayList<HapiSpecOperation>();
long contractListSize = spec.registry().getAmount(contractList + "Size");
Assertions.assertEquals(expectedSize, contractListSize, contractList + " has bad size!");
if (contractListSize > 1) {
ContractID currentID = spec.registry().getContractId(contractList + "0");
long nextIndex = 1;
while (nextIndex < contractListSize) {
ContractID nextID = spec.registry().getContractId(contractList + nextIndex);
Assertions.assertEquals(currentID.getShardNum(), nextID.getShardNum());
Assertions.assertEquals(currentID.getRealmNum(), nextID.getRealmNum());
Assertions.assertTrue(currentID.getContractNum() < nextID.getContractNum());
currentID = nextID;
nextIndex++;
}
}
for (long i = 0; i < contractListSize; i++) {
HapiSpecOperation op = getContractInfo(contractList + i).has(contractWith().propertiesInheritedFrom(parent)).logged();
opsList.add(op);
}
CustomSpecAssert.allRunFor(spec, opsList);
});
}
use of com.hedera.services.bdd.spec.HapiSpecOperation in project hedera-services by hashgraph.
the class ProviderRun method submitOp.
@Override
protected boolean submitOp(HapiApiSpec spec) {
int MAX_N = Runtime.getRuntime().availableProcessors();
int MAX_OPS_PER_SEC = maxOpsPerSecSupplier.getAsInt();
int MAX_PENDING_OPS = maxPendingOpsSupplier.getAsInt();
int BACKOFF_SLEEP_SECS = backoffSleepSecsSupplier.getAsInt();
long duration = durationSupplier.getAsLong();
OpProvider provider = providerFn.apply(spec);
allRunFor(spec, provider.suggestedInitializers().toArray(new HapiSpecOperation[0]));
log.info("Finished initialization for provider run...");
TimeUnit unit = unitSupplier.get();
Stopwatch stopwatch = Stopwatch.createStarted();
final var remainingOpsToSubmit = new AtomicInteger(totalOpsToSubmit.getAsInt());
final boolean fixedOpSubmission = (remainingOpsToSubmit.get() < 0) ? false : true;
int submittedSoFar = 0;
long durationMs = unit.toMillis(duration);
long logIncrementMs = durationMs / 100;
long nextLogTargetMs = logIncrementMs;
long lastDeltaLogged = -1;
final var opsThisSecond = new AtomicInteger(0);
final var submissionBoundaryMs = new AtomicLong(stopwatch.elapsed(MILLISECONDS) + 1_000);
while (stopwatch.elapsed(unit) < duration) {
long elapsedMs = stopwatch.elapsed(MILLISECONDS);
if (elapsedMs > submissionBoundaryMs.get()) {
submissionBoundaryMs.getAndAdd(1_000);
opsThisSecond.set(0);
}
int numPending = spec.numPendingOps();
if (elapsedMs > nextLogTargetMs) {
nextLogTargetMs += logIncrementMs;
long delta = duration - stopwatch.elapsed(unit);
if (delta != lastDeltaLogged) {
log.info(delta + " " + unit.toString().toLowerCase() + (fixedOpSubmission ? (" or " + remainingOpsToSubmit + " ops ") : "") + " left in test - " + submittedSoFar + " ops submitted so far (" + numPending + " pending).");
log.info("Precheck txn status counts :: " + spec.precheckStatusCounts());
log.info("Resolved txn status counts :: " + spec.finalizedStatusCounts());
log.info("\n------------------------------\n");
lastDeltaLogged = delta;
}
}
if (fixedOpSubmission && remainingOpsToSubmit.get() <= 0) {
if (numPending > 0) {
continue;
}
log.info("Finished submission of total {} operations", totalOpsToSubmit.getAsInt());
break;
}
if (numPending < MAX_PENDING_OPS) {
HapiSpecOperation[] burst = IntStream.range(0, Math.min(MAX_N, fixedOpSubmission ? Math.min(remainingOpsToSubmit.get(), MAX_OPS_PER_SEC - opsThisSecond.get()) : MAX_OPS_PER_SEC - opsThisSecond.get())).mapToObj(ignore -> provider.get()).flatMap(Optional::stream).peek(op -> counts.get(op.type()).getAndIncrement()).toArray(HapiSpecOperation[]::new);
if (burst.length > 0) {
allRunFor(spec, inParallel(burst));
submittedSoFar += burst.length;
if (fixedOpSubmission) {
remainingOpsToSubmit.getAndAdd(-burst.length);
}
opsThisSecond.getAndAdd(burst.length);
}
} else {
log.warn("Now " + numPending + " ops pending; backing off for " + BACKOFF_SLEEP_SECS + "s!");
try {
Thread.sleep(BACKOFF_SLEEP_SECS * 1_000L);
} catch (InterruptedException ignore) {
}
}
}
Map<HederaFunctionality, Integer> finalCounts = counts.entrySet().stream().filter(entry -> entry.getValue().get() > 0).collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().get()));
log.info("Final breakdown of *provided* ops: " + finalCounts);
log.info("Final breakdown of *resolved* statuses: " + spec.finalizedStatusCounts());
return false;
}
use of com.hedera.services.bdd.spec.HapiSpecOperation in project hedera-services by hashgraph.
the class CreateFilesBeforeReconnect method runCreateFiles.
private HapiApiSpec runCreateFiles() {
PerfTestLoadSettings settings = new PerfTestLoadSettings(FILE_CREATION_RECONNECT_TPS, DEFAULT_MINS_FOR_RECONNECT_TESTS, DEFAULT_THREADS_FOR_RECONNECT_TESTS);
Supplier<HapiSpecOperation[]> createBurst = () -> new HapiSpecOperation[] { generateFileCreateOperation() };
return defaultHapiSpec("RunCreateFiles").given(logIt(ignore -> settings.toString())).when().then(defaultLoadTest(createBurst, settings));
}
Aggregations