use of com.jd.blockchain.crypto.HashDigest in project jdchain-core by blockchain-jd-com.
the class MerkleHashDataSetTest method testDataReload.
@Test
public void testDataReload() {
String keyPrefix = "";
Random rand = new Random();
CryptoProvider[] supportedProviders = new CryptoProvider[SUPPORTED_PROVIDERS.length];
for (int i = 0; i < SUPPORTED_PROVIDERS.length; i++) {
supportedProviders[i] = Crypto.getProvider(SUPPORTED_PROVIDERS[i]);
}
CryptoConfig cryptoConfig = new CryptoConfig();
cryptoConfig.setSupportedProviders(supportedProviders);
cryptoConfig.setHashAlgorithm(ClassicAlgorithm.SHA256);
cryptoConfig.setAutoVerifyHash(true);
MemoryKVStorage storage = new MemoryKVStorage();
MerkleHashDataset mds = new MerkleHashDataset(cryptoConfig, keyPrefix, storage, storage);
Dataset<String, byte[]> ds = DatasetHelper.map(mds);
// 初始的时候没有任何数据,总是返回 null;
DataEntry verKVEntry = ds.getDataEntry("NULL_KEY");
byte[] vbytes = ds.getValue("NULL_KEY");
assertNull(verKVEntry);
assertNull(vbytes);
Map<String, Long> dataVersions = new HashMap<>();
Map<String, byte[]> dataValues = new HashMap<>();
Map<HashDigest, Map<String, KeySnapshot>> history = new LinkedHashMap<>();
HashDigest rootHash;
// generate base data sample;
// + rand.nextInt(1024);
int count = 1024;
String key;
byte[] data = new byte[64];
long v;
MerkleProof proof;
for (int i = 0; i < count; i++) {
key = "data" + i;
rand.nextBytes(data);
v = ds.setValue(key, data, -1);
dataVersions.put(key, v);
dataValues.put(key + "_" + v, data);
assertEquals(v, 0);
}
mds.commit();
// Take snapshot;
{
rootHash = mds.getRootHash();
Map<String, KeySnapshot> snapshot = new HashMap<>();
for (int i = 0; i < count; i++) {
key = "data" + i;
// TODO: 暂时注释掉默克尔证明相关的内容;
// proof = mds.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
KeySnapshot ks = new KeySnapshot();
// ks.proof = proof;
ks.rootHash = mds.getRootHash();
ks.maxVersion = ds.getVersion(key);
snapshot.put(key, ks);
}
history.put(rootHash, snapshot);
}
// verify;
{
// TODO: 暂时注释掉默克尔证明相关的内容;
// MerkleHashDataset mdsReload = new MerkleHashDataset(rootHash, cryptoConfig, keyPrefix, storage, storage, true);
// // verify every keys;
// Map<String, KeySnapshot> snapshots = history.get(rootHash);
// MerkleProof expProof;
// for (int i = 0; i < count; i++) {
// key = "data" + i;
// proof = mdsReload.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
// expProof = snapshots.get(key).rootHash;
// assertEquals(expProof.toString(), proof.toString());
// }
}
// generate multi-version data sample;
long expVer;
long maxVer = 0;
long minIdx = count;
long maxIdx = 0;
for (int t = 0; t < 100; t++) {
int bound = rand.nextInt(500) + 1;
for (int i = rand.nextInt(count); i < count; i = i + rand.nextInt(bound) + 1) {
key = "data" + i;
rand.nextBytes(data);
expVer = dataVersions.get(key);
v = ds.setValue(key, data, expVer);
assertEquals(v, expVer + 1);
dataVersions.put(key, v);
dataValues.put(key + "_" + v, data);
maxVer = v > maxVer ? v : maxVer;
minIdx = i < minIdx ? i : minIdx;
maxIdx = i > maxIdx ? i : maxIdx;
}
mds.commit();
assertNotEquals(rootHash, mds.getRootHash());
// Take snapshot;
{
// TODO: 暂时注释掉默克尔证明相关的内容;
rootHash = mds.getRootHash();
Map<String, KeySnapshot> snapshot = new HashMap<>();
for (int i = 0; i < count; i++) {
key = "data" + i;
// TODO: 暂时注释掉默克尔证明相关的内容;
// proof = mds.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
KeySnapshot ks = new KeySnapshot();
ks.rootHash = rootHash;
ks.maxVersion = ds.getVersion(key);
snapshot.put(key, ks);
}
history.put(rootHash, snapshot);
}
}
System.out.println(String.format("total count=%s; from %s to %s, max version=%s;", count, minIdx, maxIdx, maxVer));
{
for (HashDigest hisRootHash : history.keySet()) {
Map<String, KeySnapshot> snapshot = history.get(hisRootHash);
MerkleHashDataset mdsReload = new MerkleHashDataset(hisRootHash, cryptoConfig, keyPrefix, storage, storage, true);
Dataset<String, byte[]> dsReload = DatasetHelper.map(mdsReload);
assertEquals(hisRootHash, mdsReload.getRootHash());
// verify every keys;
for (int i = 0; i < count; i++) {
key = "data" + i;
// 最新版本一致;
long expLatestVersion = snapshot.get(key).maxVersion;
long actualLatestVersion = dsReload.getVersion(key);
assertEquals(expLatestVersion, actualLatestVersion);
// TODO: 暂时注释掉默克尔证明相关的内容;
// 数据证明一致;
// proof = mdsReload.getProof(key);
// assertNotNull(proof);
//
// MerkleProof expProof = snapshot.get(key).rootHash;
// assertEquals(expProof, proof);
maxVer = dataVersions.get(key);
assertTrue(actualLatestVersion > -1);
assertTrue(actualLatestVersion <= maxVer);
for (long j = 0; j < actualLatestVersion; j++) {
String keyver = key + "_" + j;
byte[] expValue = dataValues.get(keyver);
byte[] actualValue = dsReload.getValue(key, j);
assertTrue(BytesUtils.equals(expValue, actualValue));
}
}
}
}
}
use of com.jd.blockchain.crypto.HashDigest in project jdchain-core by blockchain-jd-com.
the class AccountSetTest method test.
@Test
public void test() {
OpeningAccessPolicy accessPolicy = new OpeningAccessPolicy();
MemoryKVStorage storage = new MemoryKVStorage();
CryptoProvider[] supportedProviders = new CryptoProvider[SUPPORTED_PROVIDERS.length];
for (int i = 0; i < SUPPORTED_PROVIDERS.length; i++) {
supportedProviders[i] = Crypto.getProvider(SUPPORTED_PROVIDERS[i]);
}
CryptoConfig cryptoConf = new CryptoConfig();
cryptoConf.setSupportedProviders(supportedProviders);
cryptoConf.setAutoVerifyHash(true);
cryptoConf.setHashAlgorithm(ClassicAlgorithm.SHA256);
String keyPrefix = "";
MerkleAccountSetEditor accset = new MerkleAccountSetEditor(cryptoConf, Bytes.fromString(keyPrefix), storage, storage, accessPolicy);
BlockchainKeypair userKey = BlockchainKeyGenerator.getInstance().generate();
accset.register(userKey.getAddress(), userKey.getPubKey());
// 尚未提交之前,可以检索到账户的存在,但版本仍然标记为 -1;
CompositeAccount userAcc = accset.getAccount(userKey.getAddress());
assertNotNull(userAcc);
assertTrue(accset.contains(userKey.getAddress()));
accset.commit();
HashDigest rootHash = accset.getRootHash();
assertNotNull(rootHash);
MerkleAccountSetEditor reloadAccSet = new MerkleAccountSetEditor(rootHash, cryptoConf, Bytes.fromString(keyPrefix), storage, storage, true, accessPolicy);
CompositeAccount reloadUserAcc = reloadAccSet.getAccount(userKey.getAddress());
assertNotNull(reloadUserAcc);
assertTrue(reloadAccSet.contains(userKey.getAddress()));
assertEquals(userAcc.getID().getAddress(), reloadUserAcc.getID().getAddress());
assertEquals(userAcc.getID().getPubKey(), reloadUserAcc.getID().getPubKey());
}
use of com.jd.blockchain.crypto.HashDigest in project jdchain-core by blockchain-jd-com.
the class BlockFullRollBackTest method initLedger.
private HashDigest initLedger(MemoryKVStorage storage, BlockchainKeypair... partiKeys) {
// 创建初始化配置;
LedgerInitSetting initSetting = LedgerTestUtils.createLedgerInitSetting(partiKeys);
// 创建账本;
LedgerEditor ldgEdt = LedgerTransactionalEditor.createEditor(initSetting, LEDGER_KEY_PREFIX, storage, storage, LedgerDataStructure.MERKLE_TREE);
TransactionRequest genesisTxReq = LedgerTestUtils.createLedgerInitTxRequest_SHA256(partiKeys);
LedgerTransactionContext genisisTxCtx = ldgEdt.newTransaction(genesisTxReq);
LedgerDataSetEditor ldgDS = (LedgerDataSetEditor) genisisTxCtx.getDataset();
for (int i = 0; i < partiKeys.length; i++) {
UserAccount userAccount = ldgDS.getUserAccountSet().register(partiKeys[i].getAddress(), partiKeys[i].getPubKey());
userAccount.setProperty("Name", "参与方-" + i, -1);
userAccount.setProperty("Share", "" + (10 + i), -1);
}
TransactionResult tx = genisisTxCtx.commit(TransactionState.SUCCESS);
assertEquals(genesisTxReq.getTransactionHash(), tx.getTransactionHash());
assertEquals(0, tx.getBlockHeight());
LedgerBlock block = ldgEdt.prepare();
assertEquals(0, block.getHeight());
assertNotNull(block.getHash());
assertNull(block.getPreviousHash());
// 创世区块的账本哈希为 null;
assertNull(block.getLedgerHash());
assertNotNull(block.getHash());
// 提交数据,写入存储;
ldgEdt.commit();
HashDigest ledgerHash = block.getHash();
return ledgerHash;
}
use of com.jd.blockchain.crypto.HashDigest in project jdchain-core by blockchain-jd-com.
the class PathNode method update.
@Override
protected void update(HashFunction hashFunc, NodeUpdatedListener updatedListener) {
if (!isModified()) {
return;
}
if (childNodes != null) {
// update child nodes;
for (int i = 0; i < childNodes.length; i++) {
if (childNodes[i] != null) {
if (childNodes[i].isModified()) {
childNodes[i].update(hashFunc, updatedListener);
childHashs[i] = childNodes[i].nodeHash;
childKeys[i] = childNodes[i].getTotalKeys();
childRecords[i] = childNodes[i].getTotalRecords();
}
}
}
}
byte[] nodeBytes = BinaryProtocol.encode(this, MerklePath.class);
HashDigest nodeHash = hashFunc.hash(nodeBytes);
this.nodeHash = nodeHash;
updatedListener.onUpdated(nodeHash, this, nodeBytes);
clearModified();
}
use of com.jd.blockchain.crypto.HashDigest in project jdchain-core by blockchain-jd-com.
the class MerkleSequenceTree method rehash.
/**
* 重新计算所有子节点以及自身的哈希,并返回新加入的数据节点的数量;
*
* @param pathNode 需要重新计算 hash 的路径节点;
* @param updatedNodes 用于记录已更新节点的列表;
* @return
*/
@SuppressWarnings("unused")
private int rehash(PathNode pathNode, List<AbstractMerkleNode> updatedNodes) {
// int newDataCount = 0;
boolean updated = false;
// 先检查并更新子节点的 hash;
AbstractMerkleNode[] children = pathNode.children;
HashDigest[] childrenHashes = pathNode.childrenHashes;
if (pathNode.level == 1) {
// 注:因为数据节点加入时已经进行过计算了它本身的哈希,在此不需重新计算;
for (int i = 0; i < children.length; i++) {
if (children[i] == null) {
continue;
}
HashDigest origChildHash = childrenHashes[i];
HashDigest newChildHash = children[i].getNodeHash();
if (origChildHash == null) {
// newDataCount++;
pathNode.increaseDataCount(1);
childrenHashes[i] = newChildHash;
updated = true;
updatedNodes.add(children[i]);
} else if (!origChildHash.equals(newChildHash)) {
childrenHashes[i] = newChildHash;
updated = true;
updatedNodes.add(children[i]);
}
}
} else {
for (int i = 0; i < children.length; i++) {
if (children[i] == null) {
continue;
}
// 递归重新计算子路径的哈希;
// 更新数据节点数量;
// newDataCount += rehash((PathNode) children[i], updatedNodes);
rehash((PathNode) children[i], updatedNodes);
HashDigest origChildHash = childrenHashes[i];
HashDigest newChildHash = children[i].getNodeHash();
if (origChildHash == null || !origChildHash.equals(newChildHash)) {
childrenHashes[i] = newChildHash;
updated = true;
}
}
}
// 注:当加入了新的数据节点,即 newDataCount > 0 时,必然地 updated > 0
if (updated) {
// 更新数据节点的计数器;
// pathNode.dataCount += newDataCount;
// 计算节点哈希:
pathNode.rehash();
updatedNodes.add(pathNode);
}
// return newDataCount;
return 0;
}
Aggregations