use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class PeerLink method onNext.
/**
* This peer syncing is ever so much fun. The basic contract is that each side sends its
* PeerChainTip on connect, on each new block, and every 10 seconds. Then the other side has to
* ask if it is interested in anything.
*
* So when a side receives a tip, it decides if it wants what the peer is selling.
*/
@Override
public void onNext(PeerMessage msg) {
last_received_message_time = System.currentTimeMillis();
MetricLog mlog = new MetricLog();
try {
mlog.setOperation("peer_message");
mlog.setModule("peer_link");
mlog.set("peer", getLinkId());
mlog.set("size", msg.toByteString().size());
if (msg.hasTx()) {
Transaction tx = msg.getTx();
mlog.set("type", "tx");
// logger.info("TX: " + new ChainHash(tx.getTxHash()));
try {
if (node.getMemPool().addTransaction(tx, true)) {
node.getTxBroadcaster().send(tx);
} else {
// logger.info("Chill false");
}
} catch (ValidationException e) {
if (e.toString().contains("Unable to find source tx")) {
if (node.areWeSynced()) {
ChainHash tx_id = new ChainHash(tx.getTxHash());
if (node.getBlockIngestor().reserveTxCluster(tx_id)) {
logger.fine("Requesting cluster for tx: " + tx_id);
writeMessage(PeerMessage.newBuilder().setReqCluster(RequestTransaction.newBuilder().setTxHash(tx.getTxHash()).build()).build());
}
}
}
}
// do not care about tx validation errors from peers
} else if (msg.hasTip()) {
mlog.set("type", "tip");
PeerChainTip tip = msg.getTip();
if (!node.getParams().getNetworkName().equals(tip.getNetworkName())) {
logger.log(Level.FINE, String.format("Peer has wrong name: %s", tip.getNetworkName()));
close();
return;
}
node.getPeerage().reportTip();
try (MetricLog mlog_sub = new MetricLog(mlog, "tip_trust")) {
List<ChainHash> req_import_blocks = node.getShardUtxoImport().checkTipTrust(mlog_sub, msg.getTip());
if (req_import_blocks != null) {
for (ChainHash h : req_import_blocks) {
logger.log(Level.FINE, "Requesting Import Block: " + h);
writeMessage(PeerMessage.newBuilder().setReqImportBlock(RequestImportedBlock.newBuilder().setBlockHash(h.getBytes()).build()).build());
}
mlog.set("req_imp_block_count", req_import_blocks.size());
}
}
checkTipForInterestThing(msg.getTip());
// update the peer info showing the success in getting a tip
if ((!got_first_tip) && (peer_info != null)) {
logger.log(Level.FINE, "Saving last passed");
got_first_tip = true;
PeerInfo pi = PeerInfo.newBuilder().mergeFrom(peer_info).setLastPassed(System.currentTimeMillis()).build();
node.getPeerage().learnPeer(pi);
}
BlockHeader header = tip.getHeader();
if (header.getSnowHash().size() > 0) {
Validation.checkBlockHeaderBasics(node.getParams(), header, false);
mlog.set("head_hash", new ChainHash(header.getSnowHash()).toString());
mlog.set("head_shard", header.getShardId());
mlog.set("head_height", header.getBlockHeight());
considerBlockHeader(header, header.getShardId());
node.getPeerage().setHighestHeader(header);
}
// save first peer info as opposite side
if (tip.getPeersCount() > 0) {
// first entry is host we are talking to
peer_info = tip.getPeers(0);
}
for (PeerInfo pi : tip.getPeersList()) {
if (PeerUtil.isSane(pi, node.getParams())) {
node.getPeerage().learnPeer(pi);
}
}
} else if (msg.hasReqBlock()) {
mlog.set("type", "req_block");
// Other side is asking for a block
ChainHash hash = new ChainHash(msg.getReqBlock().getBlockHash());
mlog.set("hash", hash.toString());
logger.fine("Got block request: " + hash);
Block blk = node.getDB().getBlockMap().get(hash.getBytes());
if (blk != null) {
writeMessage(PeerMessage.newBuilder().setBlock(blk).build());
}
} else if (msg.hasBlock()) {
mlog.set("type", "block");
// Getting a block, we probably asked for it. See if we can eat it.
Block blk = msg.getBlock();
synchronized (desire_block_map) {
desire_block_map.remove(new ChainHash(blk.getHeader().getPrevBlockHash()), new ChainHash(blk.getHeader().getSnowHash()));
}
mlog.set("hash", new ChainHash(blk.getHeader().getSnowHash()).toString());
try {
logger.fine(String.format("Got block shard:%d height:%d %s ", blk.getHeader().getShardId(), blk.getHeader().getBlockHeight(), new ChainHash(blk.getHeader().getSnowHash()).toString()));
// will only open if we are actually interested in this shard
node.openShard(blk.getHeader().getShardId());
if (node.getBlockIngestor(blk.getHeader().getShardId()).ingestBlock(blk)) {
// we could eat it, think about getting more blocks
scanForBlocksToRequest(new ChainHash(blk.getHeader().getSnowHash()));
// Think about getting more blocks from desire map
}
} catch (ValidationException ve) {
logger.info("Got a block %s that didn't validate - closing link");
close();
throw (ve);
}
} else if (msg.hasReqHeader()) {
mlog.set("type", "req_header");
ChainHash hash;
int shard = msg.getReqHeader().getShardId();
mlog.set("shard", shard);
if (msg.getReqHeader().getBlockHash().size() > 0) {
hash = new ChainHash(msg.getReqHeader().getBlockHash());
} else {
// Peer is asking for a block header
int height = msg.getReqHeader().getBlockHeight();
mlog.set("height", height);
hash = node.getDB().getBlockHashAtHeight(shard, height);
}
// since we are recording them all in the height map
if (hash != null) {
mlog.set("hash", hash.toString());
BlockSummary summary = node.getDB().getBlockSummaryMap().get(hash.getBytes());
if (summary == null) {
mlog.set("missing_summary", 1);
} else {
writeMessage(PeerMessage.newBuilder().setHeader(summary.getHeader()).setReqHeaderShardId(shard).build());
}
}
} else if (msg.hasHeader()) {
mlog.set("type", "header");
// We got a header, probably one we asked for
BlockHeader header = msg.getHeader();
mlog.set("hash", new ChainHash(header.getSnowHash()).toString());
Validation.checkBlockHeaderBasics(node.getParams(), header, false);
mlog.set("head_hash", new ChainHash(header.getSnowHash()).toString());
mlog.set("head_shard", header.getShardId());
mlog.set("head_height", header.getBlockHeight());
considerBlockHeader(header, msg.getReqHeaderShardId());
} else if (msg.hasReqCluster()) {
mlog.set("type", "req_cluster");
ChainHash tx_id = new ChainHash(msg.getReqCluster().getTxHash());
mlog.set("hash", tx_id.toString());
sendCluster(tx_id);
} else if (msg.hasReqImportBlock()) {
mlog.set("type", "req_import_block");
ChainHash hash = new ChainHash(msg.getReqImportBlock().getBlockHash());
mlog.set("hash", hash.toString());
ImportedBlock b = node.getShardUtxoImport().getImportBlock(hash);
if (b != null) {
writeMessage(PeerMessage.newBuilder().setImportBlock(b).build());
}
} else if (msg.hasImportBlock()) {
mlog.set("type", "import_block");
BlockHeader header = msg.getImportBlock().getHeader();
mlog.set("hash", new ChainHash(header.getSnowHash()).toString());
mlog.set("shard_id", header.getShardId());
mlog.set("height", header.getBlockHeight());
node.getShardUtxoImport().addImportedBlock(msg.getImportBlock());
} else if (msg.hasReqPreviewChain()) {
RequestPreviewChain req = msg.getReqPreviewChain();
PreviewChain chain = node.getForgeInfo().getPreviewChain(new ChainHash(req.getStartBlockHash()), req.getRequestedBlocksBack());
writeMessage(PeerMessage.newBuilder().setPreviewChain(chain).build());
} else if (msg.hasPreviewChain()) {
PreviewChain pre_chain = msg.getPreviewChain();
investigatePreviews(pre_chain.getPreviewsList());
}
} catch (ValidationException e) {
mlog.set("exception", e.toString());
logger.log(Level.INFO, "Some validation error from " + getLinkId(), e);
} catch (Throwable e) {
mlog.set("exception", e.toString());
logger.log(Level.INFO, "Some bs from " + getLinkId(), e);
close();
} finally {
mlog.close();
}
}
use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class ShardBlockForge method exploreFromCoordinatorHead.
private Set<BlockConcept> exploreFromCoordinatorHead(int coord_shard, MetricLog mlog_parent) throws ValidationException {
try (MetricLog mlog = new MetricLog(mlog_parent, "exploreFromCoordinatorHead")) {
mlog.set("coord_shard", coord_shard);
logger.fine("exploreFromCoordinatorHead(" + coord_shard + ")");
TreeSet<BlockConcept> concepts = new TreeSet<>();
// TODO - switch to get blocks around
for (BlockHeader coord_head : node.getForgeInfo().getShardHeads(coord_shard)) // for(ChainHash coord_hash : coord_heads)
{
// BlockHeader coord_head = node.getForgeInfo().getHeader(coord_hash);
if (coord_head != null) {
logger.fine(String.format("Exploring from coord head: %s s:%d h:%d", new ChainHash(coord_head.getSnowHash()).toString(), coord_head.getShardId(), coord_head.getBlockHeight()));
// Starting from the more recent coordinator head
// Find all the imported shard heads
// Note: the 3x is there because we might be at height X and some other shard is at X-skew-2 or something
// We can still build a block by bringing in more recent blocks on that shard to bring it to within skew
Map<Integer, BlockHeader> import_heads = node.getForgeInfo().getImportedShardHeads(coord_head, node.getParams().getMaxShardSkewHeight() * 3);
// System.out.println("Import heads:");
// System.out.println(getSummaryString(import_heads));
TreeMap<Double, ChainHash> possible_prevs_map = new TreeMap<>();
HashSet<ChainHash> possible_prevs = new HashSet<>();
// In case we need to expand into new shard
possible_prevs.add(new ChainHash(coord_head.getSnowHash()));
Random rnd = new Random();
// For each imported shard head, get all the new blocks under each
for (int src_shard : import_heads.keySet()) {
if (node.getInterestShards().contains(src_shard))
if (!ShardUtil.containsBothChildren(src_shard, import_heads.keySet())) {
ChainHash h = new ChainHash(import_heads.get(src_shard).getSnowHash());
Set<ChainHash> set_from_src_shard = node.getForgeInfo().climb(h, -1, node.getParams().getMaxShardSkewHeight() * 2);
logger.fine(String.format("Possible prevs from shard %d - %d - %s", src_shard, set_from_src_shard.size(), set_from_src_shard));
possible_prevs.addAll(set_from_src_shard);
BlockHeader last_header = import_heads.get(src_shard);
for (ChainHash ch : set_from_src_shard) {
possible_prevs_map.put(rnd.nextDouble() + last_header.getBlockHeight(), ch);
}
}
}
logger.info("Possible_prevs: " + possible_prevs.size());
mlog.set("possible_prevs", possible_prevs.size());
for (ChainHash prev_hash : possible_prevs_map.values()) {
expandPrev(import_heads, prev_hash, coord_head, concepts);
// if (concepts.size() > 20) break;
}
}
}
mlog.set("count", concepts.size());
return concepts;
}
}
use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class SnowUserService method getUTXONode.
@Override
public void getUTXONode(GetUTXONodeRequest request, StreamObserver<GetUTXONodeReply> responseObserver) {
try (MetricLog mlog = new MetricLog()) {
mlog.setModule("SnowUserService");
mlog.setOperation("GetUTXONode");
ChainHash utxo_root = null;
if (request.getUtxoTypeCase() == GetUTXONodeRequest.UtxoTypeCase.UTXO_ROOT_HASH) {
utxo_root = new ChainHash(request.getUtxoRootHash());
} else if (request.getUtxoTypeCase() == GetUTXONodeRequest.UtxoTypeCase.SHARD_ID) {
int shard_id = request.getShardId();
utxo_root = UtxoUpdateBuffer.EMPTY;
BlockSummary summary = node.getBlockIngestor(shard_id).getHead();
if (summary != null) {
utxo_root = new ChainHash(summary.getHeader().getUtxoRootHash());
}
} else if (request.getUtxoTypeCase() == GetUTXONodeRequest.UtxoTypeCase.ALL_SHARDS) {
responseObserver.onError(new Exception("Unsupported all_shards request - use other method"));
return;
} else {
// Root of shard 0 case
utxo_root = UtxoUpdateBuffer.EMPTY;
BlockSummary summary = node.getBlockIngestor(0).getHead();
if (summary != null) {
utxo_root = new ChainHash(summary.getHeader().getUtxoRootHash());
}
}
responseObserver.onNext(getUtxoNodeDetails(utxo_root, request));
responseObserver.onCompleted();
}
}
use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class SnowUserService method getUTXONodeMulti.
@Override
public void getUTXONodeMulti(GetUTXONodeRequest request, StreamObserver<GetUTXOReplyList> responseObserver) {
try (MetricLog mlog = new MetricLog()) {
mlog.setModule("SnowUserService");
mlog.setOperation("GetUTXONodeMulti");
if (request.getUtxoTypeCase() != GetUTXONodeRequest.UtxoTypeCase.ALL_SHARDS) {
responseObserver.onError(new Exception("unsupported type request - use other method"));
return;
}
GetUTXOReplyList.Builder reply = GetUTXOReplyList.newBuilder();
for (int s : node.getCurrentBuildingShards()) {
BlockSummary summary = node.getBlockIngestor(s).getHead();
if (summary != null) {
ChainHash utxo_root = new ChainHash(summary.getHeader().getUtxoRootHash());
reply.putReplyMap(s, getUtxoNodeDetails(utxo_root, request));
}
}
responseObserver.onNext(reply.build());
responseObserver.onCompleted();
}
}
use of duckutil.MetricLog in project snowblossom by snowblossomcoin.
the class SnowUserService method getNodeStatus.
@Override
public void getNodeStatus(NullRequest null_request, StreamObserver<NodeStatus> responseObserver) {
try (MetricLog mlog = new MetricLog()) {
mlog.setModule("SnowUserService");
mlog.setOperation("GetNodeStatus");
NodeStatus.Builder ns = NodeStatus.newBuilder();
ns.setMemPoolSize(node.getMemPool().getMemPoolSize()).setConnectedPeers(node.getPeerage().getConnectedPeerCount()).setEstimatedNodes(node.getPeerage().getEstimateUniqueNodes()).setNodeVersion(Globals.VERSION).putAllVersionMap(node.getPeerage().getVersionMap());
ns.setNetwork(node.getParams().getNetworkName());
if (node.getBlockIngestor().getHead() != null) {
ns.setHeadSummary(node.getBlockIngestor().getHead());
}
for (Map.Entry<Integer, BlockHeader> me : node.getForgeInfo().getNetworkActiveShards().entrySet()) {
ns.putNetShardHeadMap(me.getKey(), me.getValue().getSnowHash());
}
for (int s : node.getCurrentBuildingShards()) {
ns.putShardHeadMap(s, node.getBlockIngestor(s).getHead().getHeader().getSnowHash());
}
ns.addAllNetworkActiveShards(node.getForgeInfo().getNetworkActiveShards().keySet());
ns.addAllInterestShards(node.getInterestShards());
responseObserver.onNext(ns.build());
responseObserver.onCompleted();
}
}
Aggregations