use of com.hedera.mirror.importer.parser.record.entity.EntityBatchSaveEvent in project hedera-mirror-node by hashgraph.
the class NotifyingEntityListenerTest method onTopicMessagePayloadTooLong.
@Test
void onTopicMessagePayloadTooLong() throws InterruptedException {
// given
TopicMessage topicMessage = topicMessage();
// Just exceeds 8000B
topicMessage.setMessage(RandomUtils.nextBytes(5824));
Flux<TopicMessage> topicMessages = subscribe(topicMessage.getTopicId().getId());
// when
entityListener.onTopicMessage(topicMessage);
entityListener.onSave(new EntityBatchSaveEvent(this));
// then
topicMessages.as(StepVerifier::create).expectNextCount(0L).thenCancel().verify(Duration.ofMillis(500));
}
use of com.hedera.mirror.importer.parser.record.entity.EntityBatchSaveEvent in project hedera-mirror-node by hashgraph.
the class RedisEntityListenerTest method submitAndSave.
private void submitAndSave(TopicMessage topicMessage) {
try {
entityListener.onTopicMessage(topicMessage);
entityListener.onSave(new EntityBatchSaveEvent(this));
} catch (Exception e) {
Thread.currentThread().interrupt();
}
}
use of com.hedera.mirror.importer.parser.record.entity.EntityBatchSaveEvent in project hedera-mirror-node by hashgraph.
the class SqlEntityListener method executeBatches.
private void executeBatches() {
try {
// batch save action may run asynchronously, triggering it before other operations can reduce latency
eventPublisher.publishEvent(new EntityBatchSaveEvent(this));
Stopwatch stopwatch = Stopwatch.createStarted();
// insert only operations
batchPersister.persist(assessedCustomFees);
batchPersister.persist(contractLogs);
batchPersister.persist(contractResults);
batchPersister.persist(contractStateChanges);
batchPersister.persist(cryptoTransfers);
batchPersister.persist(customFees);
batchPersister.persist(ethereumTransactions);
batchPersister.persist(fileData);
batchPersister.persist(liveHashes);
batchPersister.persist(nodeStakes);
batchPersister.persist(topicMessages);
batchPersister.persist(transactions);
batchPersister.persist(transactionSignatures);
// insert operations with conflict management
batchPersister.persist(contracts);
batchPersister.persist(cryptoAllowances);
batchPersister.persist(entities);
batchPersister.persist(nftAllowances);
batchPersister.persist(tokens.values());
// ingest tokenAccounts after tokens since some fields of token accounts depends on the associated token
batchPersister.persist(tokenAccounts.values());
batchPersister.persist(tokenAllowances);
// persist nft after token entity
batchPersister.persist(nfts.values());
batchPersister.persist(schedules.values());
// transfers operations should be last to ensure insert logic completeness, entities should already exist
batchPersister.persist(nonFeeTransfers);
batchPersister.persist(nftTransferState.values());
batchPersister.persist(stakingRewardTransfers);
batchPersister.persist(tokenTransfers);
// handle the transfers from token dissociate transactions after nft is processed
tokenDissociateTransferBatchPersister.persist(tokenDissociateTransfers);
log.info("Completed batch inserts in {}", stopwatch);
} catch (ParserException e) {
throw e;
} catch (Exception e) {
throw new ParserException(e);
} finally {
cleanup();
}
}
Aggregations