use of snowblossom.proto.Transaction in project snowblossom by snowblossomcoin.
the class MonitorTool method sendNotices.
private void sendNotices(AddressSpecHash hash, ByteString tx_hash) {
ByteString key = hash.getBytes().concat(tx_hash);
synchronized (processed_tx) {
if (processed_tx.contains(key))
return;
}
Transaction tx = stub_holder.getBlockingStub().getTransaction(RequestTransaction.newBuilder().setTxHash(tx_hash).build());
TransactionInner inner = TransactionUtil.getInner(tx);
int idx = 0;
for (TransactionInput in : inner.getInputsList()) {
if (hash.getBytes().equals(in.getSpecHash())) {
monitor_interface.onOutbound(tx, idx);
}
idx++;
}
idx = 0;
for (TransactionOutput out : inner.getOutputsList()) {
if (hash.getBytes().equals(out.getRecipientSpecHash())) {
monitor_interface.onInbound(tx, idx);
}
idx++;
}
synchronized (processed_tx) {
processed_tx.add(key);
}
}
use of snowblossom.proto.Transaction in project snowblossom by snowblossomcoin.
the class BlockIngestor method checkResummary.
private void checkResummary() {
BlockSummary curr = chainhead;
LinkedList<ChainHash> recalc_list = new LinkedList<>();
while (curr.getSummaryVersion() < SUMMARY_VERSION) {
recalc_list.addFirst(new ChainHash(curr.getHeader().getSnowHash()));
ChainHash prevblock = new ChainHash(curr.getHeader().getPrevBlockHash());
if (prevblock.equals(ChainHash.ZERO_HASH)) {
curr = getStartSummary();
} else {
curr = db.getBlockSummaryMap().get(prevblock.getBytes());
}
}
if (recalc_list.size() > 0) {
node.setStatus(String.format("Need to recalcuate chain index of %d blocks now", recalc_list.size()));
for (ChainHash hash : recalc_list) {
BlockSummary summary = db.getBlockSummaryMap().get(hash.getBytes());
Block blk = db.getBlockMap().get(hash.getBytes());
node.setStatus("Reindexing: " + summary.getHeader().getBlockHeight() + " - " + hash + " - " + blk.getTransactionsCount());
ChainHash prevblock = new ChainHash(summary.getHeader().getPrevBlockHash());
BlockSummary prevsummary = null;
if (prevblock.equals(ChainHash.ZERO_HASH)) {
prevsummary = getStartSummary();
} else {
prevsummary = db.getBlockSummaryMap().get(prevblock.getBytes());
}
long tx_body_size = 0;
for (Transaction tx : blk.getTransactionsList()) {
tx_body_size += tx.getInnerData().size();
tx_body_size += tx.getTxHash().size();
}
summary = BlockchainUtil.getNewSummary(blk.getHeader(), prevsummary, node.getParams(), blk.getTransactionsCount(), tx_body_size, blk.getImportedBlocksList());
summary = saveOtherChainIndexBits(summary, prevsummary, blk);
db.getBlockSummaryMap().put(hash.getBytes(), summary);
}
// Resave head
chainhead = db.getBlockSummaryMap().get(chainhead.getHeader().getSnowHash());
db.getBlockSummaryMap().put(HEAD, chainhead);
}
}
use of snowblossom.proto.Transaction in project snowblossom by snowblossomcoin.
the class BlockIngestor method ingestBlock.
public boolean ingestBlock(Block blk) throws ValidationException {
ChainHash blockhash;
try (TimeRecordAuto tra_blk = TimeRecord.openAuto("BlockIngestor.ingestBlock");
MetricLog mlog = new MetricLog()) {
mlog.setOperation("ingest_block");
mlog.setModule("block_ingestor");
Validation.checkBlockBasics(node.getParams(), blk, true, false);
if (blk.getHeader().getShardId() != shard_id) {
throw new ValidationException("Block for incorrect shard");
}
blockhash = new ChainHash(blk.getHeader().getSnowHash());
mlog.set("hash", blockhash.toString());
mlog.set("height", blk.getHeader().getBlockHeight());
mlog.set("shard", blk.getHeader().getShardId());
mlog.set("size", blk.toByteString().size());
mlog.set("tx_count", blk.getTransactionsCount());
if (db.getBlockSummaryMap().containsKey(blockhash.getBytes())) {
return false;
}
ChainHash prevblock = new ChainHash(blk.getHeader().getPrevBlockHash());
BlockSummary prev_summary;
if (prevblock.equals(ChainHash.ZERO_HASH)) {
prev_summary = getStartSummary();
} else {
try (TimeRecordAuto tra_prv = TimeRecord.openAuto("BlockIngestor.getPrevSummary")) {
prev_summary = db.getBlockSummaryMap().get(prevblock.getBytes());
}
}
if (prev_summary == null) {
return false;
}
long tx_body_size = 0;
for (Transaction tx : blk.getTransactionsList()) {
tx_body_size += tx.getInnerData().size();
tx_body_size += tx.getTxHash().size();
}
BlockSummary summary = BlockchainUtil.getNewSummary(blk.getHeader(), prev_summary, node.getParams(), blk.getTransactionsCount(), tx_body_size, blk.getImportedBlocksList());
Validation.deepBlockValidation(node.getParams(), node.getUtxoHashedTrie(), blk, prev_summary);
summary = saveOtherChainIndexBits(summary, prev_summary, blk);
if (tx_index) {
try (TimeRecordAuto tra_tx = TimeRecord.openAuto("BlockIngestor.saveTx")) {
ByteString block_hash_str = blockhash.getBytes();
HashMap<ByteString, Transaction> tx_map = new HashMap<>();
for (Transaction tx : blk.getTransactionsList()) {
tx_map.put(tx.getTxHash(), tx);
}
db.getTransactionMap().putAll(tx_map);
}
}
try (TimeRecordAuto tra_tx = TimeRecord.openAuto("BlockIngestor.blockSave")) {
db.getBlockMap().put(blockhash.getBytes(), blk);
saveBlockChildMapping(blk.getHeader().getPrevBlockHash(), blockhash.getBytes());
for (ImportedBlock ib : blk.getImportedBlocksList()) {
// not positive we actually need this, but what the hell
saveBlockChildMapping(ib.getHeader().getPrevBlockHash(), ib.getHeader().getSnowHash());
node.getDB().getBlockHeaderMap().put(ib.getHeader().getSnowHash(), ib.getHeader());
}
db.setBestBlockAt(blk.getHeader().getShardId(), blk.getHeader().getBlockHeight(), BlockchainUtil.readInteger(summary.getWorkSum()));
// THIS IS SUPER IMPORTANT!!!!
// the summary being saved in the summary map acts as a signal that
// - this block is fully stored
// - we have the utxo saved
// - we have the block itself saved
// - if we are using tx_index, we have the transactions saved
// - the previous block summary is also saved, which by induction means
// that we have every block from this one all the way back to block 0
// In short, after the summary is written, things can depend on this being
// a valid and correct block that goes all the way back to block 0.
// It might not be in the main chain, but it can be counted on to be valid chain
db.getBlockSummaryMap().put(blockhash.getBytes(), summary);
mlog.set("saved", 1);
}
if (ShardUtil.shardSplit(summary, params)) {
for (int child : ShardUtil.getShardChildIds(summary.getHeader().getShardId())) {
mlog.set("shard_split", 1);
try {
node.openShard(child);
} catch (Exception e) {
logger.warning(" Unable to open shard: " + e);
}
}
}
ChainHash prev_hash = new ChainHash(blk.getHeader().getPrevBlockHash());
logger.info(String.format("New block: Shard %d Height %d %s (tx:%d sz:%d) - from %s", shard_id, blk.getHeader().getBlockHeight(), blockhash, blk.getTransactionsCount(), blk.toByteString().size(), prev_hash));
node.getBlockForge().tickle(summary);
SnowUserService u = node.getUserService();
if (u != null) {
u.tickleBlocks();
}
if (BlockchainUtil.isBetter(chainhead, summary)) {
mlog.set("head_update", 1);
chainhead = summary;
db.getBlockSummaryMap().put(HEAD, summary);
// System.out.println("UTXO at new root: " + HexUtil.getHexString(summary.getHeader().getUtxoRootHash()));
// node.getUtxoHashedTrie().printTree(summary.getHeader().getUtxoRootHash());
updateHeights(summary);
logger.info(String.format("New chain tip: Shard %d Height %d %s (tx:%d sz:%d)", shard_id, blk.getHeader().getBlockHeight(), blockhash, blk.getTransactionsCount(), blk.toByteString().size()));
String age = MiscUtils.getAgeSummary(System.currentTimeMillis() - blk.getHeader().getTimestamp());
logger.info(String.format(" The activated field is %d (%s). This block was %s ago.", chainhead.getActivatedField(), params.getSnowFieldInfo(chainhead.getActivatedField()).getName(), age));
if (u != null) {
u.tickleBlocks();
}
node.getMemPool(shard_id).tickleBlocks(new ChainHash(summary.getHeader().getUtxoRootHash()));
node.getPeerage().sendAllTips(summary.getHeader().getShardId());
}
}
return true;
}
use of snowblossom.proto.Transaction in project snowblossom by snowblossomcoin.
the class MemPool method buildTXCluster.
/**
* Attemped to build an ordered list of transactions
* that can confirm. In the simple case, it is just
* a single transaction that has all outputs already in utxo.
* In the more complex case, a chain of transactions needs to go
* in for the transaction in question to be confirmed.
* TODO - make faster, this thing sucks out loud.
* Probably need to actually build the graph and do graph
* theory things.
*/
private TXCluster buildTXCluster(Transaction target_tx) throws ValidationException {
HashMap<ChainHash, Transaction> working_map = new HashMap<>();
HashMultimap<ChainHash, ChainHash> depends_on_map = HashMultimap.<ChainHash, ChainHash>create();
LinkedList<TransactionInput> needed_inputs = new LinkedList<>();
addInputRequirements(target_tx, depends_on_map, needed_inputs);
working_map.put(new ChainHash(target_tx.getTxHash()), target_tx);
long t1;
while (needed_inputs.size() > 0) {
TransactionInput in = needed_inputs.pop();
ChainHash needed_tx = new ChainHash(in.getSrcTxId());
if (!working_map.containsKey(needed_tx)) {
ByteString key = UtxoUpdateBuffer.getKey(in);
t1 = System.nanoTime();
ByteString matching_output = utxo_hashed_trie.getLeafData(utxo_for_pri_map.getBytes(), key);
TimeRecord.record(t1, "utxo_lookup");
if (matching_output == null) {
if (known_transactions.containsKey(needed_tx)) {
t1 = System.nanoTime();
// TODO Check shard IDs
Transaction found_tx = known_transactions.get(needed_tx).tx;
TransactionInner found_tx_inner = TransactionUtil.getInner(found_tx);
TransactionOutput tx_out = found_tx_inner.getOutputs(in.getSrcTxOutIdx());
if (!shard_cover_set.contains(tx_out.getTargetShard())) {
throw new ValidationException(String.format("Transaction %s depends on %s which seems to be in other shard", new ChainHash(target_tx.getTxHash()), in.toString()));
}
working_map.put(needed_tx, found_tx);
addInputRequirements(found_tx, depends_on_map, needed_inputs);
TimeRecord.record(t1, "input_add");
} else {
throw new ValidationException(String.format("Unable to find source tx %s", needed_tx.toString()));
}
}
}
}
// At this point we have all the inputs satisfied. Now to figure out ordering.
t1 = System.nanoTime();
LinkedList<Transaction> ordered_list = getOrderdTxList(working_map, depends_on_map, new ChainHash(target_tx.getTxHash()));
TimeRecord.record(t1, "get_order");
t1 = System.nanoTime();
UtxoUpdateBuffer test_buffer = new UtxoUpdateBuffer(utxo_hashed_trie, utxo_for_pri_map);
int header_version = 1;
if (chain_state_source.getParams().getActivationHeightShards() <= chain_state_source.getHeight() + 1) {
header_version = 2;
}
BlockHeader dummy_header = BlockHeader.newBuilder().setBlockHeight(chain_state_source.getHeight() + 1).setTimestamp(System.currentTimeMillis()).setVersion(header_version).build();
// TODO - assign shard correctly
Map<Integer, UtxoUpdateBuffer> export_utxo_buffer = new TreeMap<>();
for (Transaction t : ordered_list) {
Validation.deepTransactionCheck(t, test_buffer, dummy_header, chain_state_source.getParams(), shard_cover_set, export_utxo_buffer);
}
TimeRecord.record(t1, "utxo_sim");
return new TXCluster(ordered_list);
}
use of snowblossom.proto.Transaction in project snowblossom by snowblossomcoin.
the class MemPool method getTransactionsForBlock.
public synchronized List<Transaction> getTransactionsForBlock(ChainHash last_utxo, int max_size) {
try (MetricLog mlog = new MetricLog()) {
mlog.setOperation("get_transactions_for_block");
mlog.setModule("mem_pool");
mlog.set("max_size", max_size);
mlog.set("shard_id", chain_state_source.getShardId());
List<Transaction> block_list = new ArrayList<Transaction>();
Set<ChainHash> included_txs = new HashSet<>();
if (!last_utxo.equals(utxo_for_pri_map)) {
mlog.set("priority_map_rebuild", 1);
try (MetricLog sub_log = new MetricLog(mlog, "rebuild")) {
sub_log.setOperation("priority_map_rebuild");
sub_log.setModule("mem_pool");
rebuildPriorityMap(last_utxo);
}
} else {
mlog.set("priority_map_rebuild", 0);
}
int size = 0;
int low_fee_size = 0;
TreeMultimap<Double, TXCluster> priority_map_copy = TreeMultimap.<Double, TXCluster>create();
priority_map_copy.putAll(priority_map);
while (priority_map_copy.size() > 0) {
Map.Entry<Double, Collection<TXCluster>> last_entry = priority_map_copy.asMap().pollLastEntry();
double ratio = last_entry.getKey();
boolean low_fee = false;
if (ratio < Globals.LOW_FEE)
low_fee = true;
Collection<TXCluster> list = last_entry.getValue();
for (TXCluster cluster : list) {
if (size + cluster.total_size <= max_size) {
if ((!low_fee) || (low_fee_size < low_fee_max)) {
for (Transaction tx : cluster.tx_list) {
ChainHash tx_hash = new ChainHash(tx.getTxHash());
if (!included_txs.contains(tx_hash)) {
block_list.add(tx);
included_txs.add(tx_hash);
int sz = tx.toByteString().size();
size += sz;
if (low_fee) {
low_fee_size += sz;
}
}
}
}
}
}
}
mlog.set("size", size);
mlog.set("tx_count", block_list.size());
return block_list;
}
}
Aggregations