use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class MemPool method getTransactionsForBlock.
public synchronized List<Transaction> getTransactionsForBlock(ChainHash last_utxo, int max_size) {
try (MetricLog mlog = new MetricLog()) {
mlog.setOperation("get_transactions_for_block");
mlog.setModule("mem_pool");
mlog.set("max_size", max_size);
mlog.set("shard_id", chain_state_source.getShardId());
List<Transaction> block_list = new ArrayList<Transaction>();
Set<ChainHash> included_txs = new HashSet<>();
if (!last_utxo.equals(utxo_for_pri_map)) {
mlog.set("priority_map_rebuild", 1);
try (MetricLog sub_log = new MetricLog(mlog, "rebuild")) {
sub_log.setOperation("priority_map_rebuild");
sub_log.setModule("mem_pool");
rebuildPriorityMap(last_utxo);
}
} else {
mlog.set("priority_map_rebuild", 0);
}
int size = 0;
int low_fee_size = 0;
TreeMultimap<Double, TXCluster> priority_map_copy = TreeMultimap.<Double, TXCluster>create();
priority_map_copy.putAll(priority_map);
while (priority_map_copy.size() > 0) {
Map.Entry<Double, Collection<TXCluster>> last_entry = priority_map_copy.asMap().pollLastEntry();
double ratio = last_entry.getKey();
boolean low_fee = false;
if (ratio < Globals.LOW_FEE)
low_fee = true;
Collection<TXCluster> list = last_entry.getValue();
for (TXCluster cluster : list) {
if (size + cluster.total_size <= max_size) {
if ((!low_fee) || (low_fee_size < low_fee_max)) {
for (Transaction tx : cluster.tx_list) {
ChainHash tx_hash = new ChainHash(tx.getTxHash());
if (!included_txs.contains(tx_hash)) {
block_list.add(tx);
included_txs.add(tx_hash);
int sz = tx.toByteString().size();
size += sz;
if (low_fee) {
low_fee_size += sz;
}
}
}
}
}
}
}
mlog.set("size", size);
mlog.set("tx_count", block_list.size());
return block_list;
}
}
use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class MemPool method addTransaction.
/**
* @return true iff this seems to be a new and valid tx
*/
public boolean addTransaction(Transaction tx, boolean p2p_source) throws ValidationException {
try (MetricLog mlog = new MetricLog()) {
long t1 = System.nanoTime();
Validation.checkTransactionBasics(tx, false);
mlog.set("basic_validation", 1);
TimeRecord.record(t1, "mempool:tx_validation");
long t_lock = System.nanoTime();
synchronized (this) {
TimeRecord.record(t_lock, "mempool:have_lock");
mlog.setOperation("add_transaction");
mlog.setModule("mem_pool");
mlog.set("added", 0);
if ((p2p_source) && (!accepts_p2p_tx)) {
mlog.set("reject_p2p", 1);
return false;
}
ChainHash tx_hash = new ChainHash(tx.getTxHash());
mlog.set("tx_id", tx_hash.toString());
if (known_transactions.containsKey(tx_hash)) {
mlog.set("already_known", 1);
return false;
}
if (known_transactions.size() >= MEM_POOL_MAX) {
throw new ValidationException("mempool is full");
}
TransactionMempoolInfo info = new TransactionMempoolInfo(tx);
TransactionInner inner = info.inner;
double tx_ratio = (double) inner.getFee() / (double) tx.toByteString().size();
mlog.set("fee", inner.getFee());
mlog.set("fee_ratio", tx_ratio);
if (tx_ratio < Globals.LOW_FEE) {
mlog.set("low_fee", 1);
if (known_transactions.size() >= MEM_POOL_MAX_LOW) {
throw new ValidationException("mempool is too full for low fee transactions");
}
}
TreeSet<String> used_outputs = new TreeSet<>();
TimeRecord.record(t1, "mempool:p1");
long t3 = System.nanoTime();
mlog.set("input_count", inner.getInputsCount());
mlog.set("output_count", inner.getOutputsCount());
for (TransactionInput in : inner.getInputsList()) {
String key = HexUtil.getHexString(in.getSrcTxId()) + ":" + in.getSrcTxOutIdx();
used_outputs.add(key);
if (claimed_outputs.containsKey(key)) {
if (!claimed_outputs.get(key).equals(tx_hash)) {
throw new ValidationException("Discarding as double-spend");
}
}
}
TimeRecord.record(t3, "mempool:input_proc");
long output_total = inner.getFee();
for (TransactionOutput out : inner.getOutputsList()) {
output_total += out.getValue();
}
mlog.set("total_output", output_total);
if (utxo_for_pri_map != null) {
long t2 = System.nanoTime();
TXCluster cluster = buildTXCluster(tx);
mlog.set("cluster_tx_count", cluster.tx_list.size());
mlog.set("cluster_tx_size", cluster.total_size);
TimeRecord.record(t2, "mempool:build_cluster");
if (cluster == null) {
throw new ValidationException("Unable to find a tx cluster that makes this work");
}
double ratio = (double) cluster.total_fee / (double) cluster.total_size;
// Random rnd = new Random();
// ratio = ratio * 1e9 + rnd.nextDouble();
long t4 = System.nanoTime();
priority_map.put(ratio, cluster);
TimeRecord.record(t4, "mempool:primapput");
}
TimeRecord.record(t1, "mempool:p2");
known_transactions.put(tx_hash, info);
for (AddressSpecHash spec_hash : info.involved_addresses) {
address_tx_map.put(spec_hash, tx_hash);
}
// Claim outputs used by inputs
for (String key : used_outputs) {
claimed_outputs.put(key, tx_hash);
}
TimeRecord.record(t1, "mempool:tx_add");
TimeRecord.record(t1, "mempool:p3");
for (MemPoolTickleInterface listener : mempool_listener) {
listener.tickleMemPool(tx, info.involved_addresses);
}
mlog.set("added", 1);
return true;
}
}
}
Aggregations