use of snowblossom.proto.BlockHeader in project snowblossom by snowblossomcoin.
the class BlockIngestorTest method testFirstBlockSummary.
@Test
public void testFirstBlockSummary() {
NetworkParams params = new NetworkParamsTestnet();
BlockHeader header = BlockHeader.newBuilder().setTarget(BlockchainUtil.targetBigIntegerToBytes(params.getMaxTarget())).setTimestamp(System.currentTimeMillis()).build();
BlockSummary prev_summary;
prev_summary = BlockSummary.newBuilder().build();
System.out.println(prev_summary);
BlockSummary s = BlockchainUtil.getNewSummary(header, prev_summary, params, 1L, 600L, new LinkedList());
Assert.assertNotNull(s.getHeader());
Assert.assertEquals("1024", s.getWorkSum());
Assert.assertEquals(params.getMaxTarget().toString(), s.getTargetAverage());
Assert.assertEquals(params.getBlockTimeTarget(), s.getBlocktimeAverageMs());
}
use of snowblossom.proto.BlockHeader in project snowblossom by snowblossomcoin.
the class GraphOutput method getGraph.
public static JSONObject getGraph(Collection<BlockHeader> blocks) {
JSONArray node_array = new JSONArray();
JSONArray link_array = new JSONArray();
HashMap<ChainHash, BlockHeader> block_map = new HashMap<>();
HashMap<ChainHash, String> name_map = new HashMap<>();
HashMap<ChainHash, Integer> id_map = new HashMap<>();
int id = 1;
// Set nodes
for (BlockHeader bh : blocks) {
ChainHash hash = new ChainHash(bh.getSnowHash());
block_map.put(hash, bh);
String name = getName(bh);
name_map.put(hash, name);
id_map.put(hash, id);
JSONObject node = new JSONObject();
node.put("hash", hash.toString());
node.put("name", name);
node.put("shard", bh.getShardId());
node.put("timestamp", bh.getTimestamp());
node.put("height", bh.getBlockHeight());
node_array.add(node);
id++;
}
// Build links
for (BlockHeader bh : blocks) {
ChainHash hash = new ChainHash(bh.getSnowHash());
ChainHash prev = new ChainHash(bh.getPrevBlockHash());
int my_id = id_map.get(hash);
if (id_map.containsKey(prev)) {
JSONObject link = new JSONObject();
link.put("source", hash.toString());
link.put("target", prev.toString());
link.put("parent", 1);
link_array.add(link);
}
for (BlockImportList bil : bh.getShardImportMap().values()) {
for (ByteString bs : bil.getHeightMap().values()) {
ChainHash imp_hash = new ChainHash(bs);
if (id_map.containsKey(imp_hash)) {
JSONObject link = new JSONObject();
link.put("source", hash.toString());
link.put("target", imp_hash.toString());
link.put("import", 1);
link_array.add(link);
}
}
}
}
JSONObject graph = new JSONObject();
graph.put("nodes", node_array);
graph.put("links", link_array);
return graph;
}
use of snowblossom.proto.BlockHeader in project snowblossom by snowblossomcoin.
the class DBMaintThread method runPass.
@Override
public void runPass() throws Exception {
int maint_height = 0;
ByteString db_maint_data = node.getDB().getSpecialMap().get("db_maint_height");
if (db_maint_data != null) {
maint_height = Integer.parseInt(new String(db_maint_data.toByteArray()));
}
int curr_height = 0;
BlockHeader high = node.getPeerage().getHighestSeenHeader();
if (high != null) {
curr_height = high.getBlockHeight();
}
logger.fine(String.format("last maint: %d curr: %d", maint_height, curr_height));
if (curr_height >= maint_height + maint_gap) {
logger.info(String.format("Running db maint - last maint: %d curr: %d", maint_height, curr_height));
node.getDB().interactiveMaint();
String curr_str = "" + curr_height;
node.getDB().getSpecialMap().put("db_maint_height", ByteString.copyFrom(curr_str.getBytes()));
}
}
use of snowblossom.proto.BlockHeader in project snowblossom by snowblossomcoin.
the class SnowBlossomNode method calcCurrentBuildingShards.
private Set<Integer> calcCurrentBuildingShards() {
logger.fine("Recalculating current building shards");
TreeSet<Integer> res = new TreeSet<>();
for (int s : getActiveShards()) {
if (getBlockIngestor(s).getHead() != null) {
BlockHeader s_head = getBlockIngestor(s).getHead().getHeader();
int child = 0;
for (int c : ShardUtil.getShardChildIds(s)) {
int active = 0;
if (getForgeInfo().getNetworkActiveShards().containsKey(c))
active = 1;
if (getForgeInfo().getShardHead(c) != null) {
active = 1;
// The case of if we split off the child shards but that
// split got reorged out
BlockHeader c_head = getForgeInfo().getShardHead(c);
if (c_head.getBlockHeight() <= s_head.getBlockHeight()) {
active = 0;
}
}
if (active > 0) {
child++;
}
}
if (child < 2)
res.add(s);
}
}
return res;
}
use of snowblossom.proto.BlockHeader in project snowblossom by snowblossomcoin.
the class MemPool method buildTXCluster.
/**
* Attemped to build an ordered list of transactions
* that can confirm. In the simple case, it is just
* a single transaction that has all outputs already in utxo.
* In the more complex case, a chain of transactions needs to go
* in for the transaction in question to be confirmed.
* TODO - make faster, this thing sucks out loud.
* Probably need to actually build the graph and do graph
* theory things.
*/
private TXCluster buildTXCluster(Transaction target_tx) throws ValidationException {
HashMap<ChainHash, Transaction> working_map = new HashMap<>();
HashMultimap<ChainHash, ChainHash> depends_on_map = HashMultimap.<ChainHash, ChainHash>create();
LinkedList<TransactionInput> needed_inputs = new LinkedList<>();
addInputRequirements(target_tx, depends_on_map, needed_inputs);
working_map.put(new ChainHash(target_tx.getTxHash()), target_tx);
long t1;
while (needed_inputs.size() > 0) {
TransactionInput in = needed_inputs.pop();
ChainHash needed_tx = new ChainHash(in.getSrcTxId());
if (!working_map.containsKey(needed_tx)) {
ByteString key = UtxoUpdateBuffer.getKey(in);
t1 = System.nanoTime();
ByteString matching_output = utxo_hashed_trie.getLeafData(utxo_for_pri_map.getBytes(), key);
TimeRecord.record(t1, "utxo_lookup");
if (matching_output == null) {
if (known_transactions.containsKey(needed_tx)) {
t1 = System.nanoTime();
// TODO Check shard IDs
Transaction found_tx = known_transactions.get(needed_tx).tx;
TransactionInner found_tx_inner = TransactionUtil.getInner(found_tx);
TransactionOutput tx_out = found_tx_inner.getOutputs(in.getSrcTxOutIdx());
if (!shard_cover_set.contains(tx_out.getTargetShard())) {
throw new ValidationException(String.format("Transaction %s depends on %s which seems to be in other shard", new ChainHash(target_tx.getTxHash()), in.toString()));
}
working_map.put(needed_tx, found_tx);
addInputRequirements(found_tx, depends_on_map, needed_inputs);
TimeRecord.record(t1, "input_add");
} else {
throw new ValidationException(String.format("Unable to find source tx %s", needed_tx.toString()));
}
}
}
}
// At this point we have all the inputs satisfied. Now to figure out ordering.
t1 = System.nanoTime();
LinkedList<Transaction> ordered_list = getOrderdTxList(working_map, depends_on_map, new ChainHash(target_tx.getTxHash()));
TimeRecord.record(t1, "get_order");
t1 = System.nanoTime();
UtxoUpdateBuffer test_buffer = new UtxoUpdateBuffer(utxo_hashed_trie, utxo_for_pri_map);
int header_version = 1;
if (chain_state_source.getParams().getActivationHeightShards() <= chain_state_source.getHeight() + 1) {
header_version = 2;
}
BlockHeader dummy_header = BlockHeader.newBuilder().setBlockHeight(chain_state_source.getHeight() + 1).setTimestamp(System.currentTimeMillis()).setVersion(header_version).build();
// TODO - assign shard correctly
Map<Integer, UtxoUpdateBuffer> export_utxo_buffer = new TreeMap<>();
for (Transaction t : ordered_list) {
Validation.deepTransactionCheck(t, test_buffer, dummy_header, chain_state_source.getParams(), shard_cover_set, export_utxo_buffer);
}
TimeRecord.record(t1, "utxo_sim");
return new TXCluster(ordered_list);
}
Aggregations