use of com.jd.blockchain.ledger.MerkleProof in project jdchain-core by blockchain-jd-com.
the class MerkleHashDataSetTest method testInsertSameData.
@Test
public void testInsertSameData() {
String keyPrefix = "";
Random rand = new Random();
CryptoSetting cryptoSetting = createCryptoSetting();
MemoryKVStorage storage = new MemoryKVStorage();
MerkleHashDataset mds = new MerkleHashDataset(cryptoSetting, keyPrefix, storage, storage);
Dataset<String, byte[]> ds = DatasetHelper.map(mds);
// 初始的时候没有任何数据,总是返回 null;
DataEntry verKVEntry = ds.getDataEntry("NULL_KEY");
byte[] vbytes = ds.getValue("NULL_KEY");
assertNull(verKVEntry);
assertNull(vbytes);
Map<String, Long> dataVersions = new HashMap<>();
// Map<String, byte[]> dataValues = new HashMap<>();
Map<HashDigest, Map<String, KeySnapshot>> history = new LinkedHashMap<>();
HashDigest rootHash;
// generate base data sample;
// + rand.nextInt(1024);
int count = 1024;
String key;
byte[] data = new byte[64];
rand.nextBytes(data);
long v;
MerkleProof proof;
for (int i = 0; i < count; i++) {
key = "data" + i;
v = ds.setValue(key, data, -1);
dataVersions.put(key, v);
// dataValues.put(key + "_" + v, data);
assertEquals(v, 0);
}
mds.commit();
// Take snapshot;
{
rootHash = mds.getRootHash();
Map<String, KeySnapshot> snapshot = new HashMap<>();
for (int i = 0; i < count; i++) {
key = "data" + i;
// TODO: 暂时注释掉默克尔证明相关的内容;
// proof = mds.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
KeySnapshot ks = new KeySnapshot();
ks.rootHash = mds.getRootHash();
ks.maxVersion = ds.getVersion(key);
snapshot.put(key, ks);
}
history.put(rootHash, snapshot);
}
// verify;
{
MerkleHashDataset mdsReload = new MerkleHashDataset(rootHash, cryptoSetting, keyPrefix, storage, storage, true);
Dataset<String, byte[]> dsReload = DatasetHelper.map(mdsReload);
// verify every keys;
Map<String, KeySnapshot> snapshot = history.get(rootHash);
MerkleProof expProof;
for (int i = 0; i < count; i++) {
key = "data" + i;
// TODO: 暂时注释掉默克尔证明相关的内容;
// proof = mdsReload.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
// expProof = snapshot.get(key).rootHash;
// assertEquals(expProof.toString(), proof.toString());
byte[] value = dsReload.getValue(key);
assertTrue(BytesUtils.equals(data, value));
}
}
}
use of com.jd.blockchain.ledger.MerkleProof in project jdchain-core by blockchain-jd-com.
the class MerkleHashDataSetTest method testDataReload.
@Test
public void testDataReload() {
String keyPrefix = "";
Random rand = new Random();
CryptoProvider[] supportedProviders = new CryptoProvider[SUPPORTED_PROVIDERS.length];
for (int i = 0; i < SUPPORTED_PROVIDERS.length; i++) {
supportedProviders[i] = Crypto.getProvider(SUPPORTED_PROVIDERS[i]);
}
CryptoConfig cryptoConfig = new CryptoConfig();
cryptoConfig.setSupportedProviders(supportedProviders);
cryptoConfig.setHashAlgorithm(ClassicAlgorithm.SHA256);
cryptoConfig.setAutoVerifyHash(true);
MemoryKVStorage storage = new MemoryKVStorage();
MerkleHashDataset mds = new MerkleHashDataset(cryptoConfig, keyPrefix, storage, storage);
Dataset<String, byte[]> ds = DatasetHelper.map(mds);
// 初始的时候没有任何数据,总是返回 null;
DataEntry verKVEntry = ds.getDataEntry("NULL_KEY");
byte[] vbytes = ds.getValue("NULL_KEY");
assertNull(verKVEntry);
assertNull(vbytes);
Map<String, Long> dataVersions = new HashMap<>();
Map<String, byte[]> dataValues = new HashMap<>();
Map<HashDigest, Map<String, KeySnapshot>> history = new LinkedHashMap<>();
HashDigest rootHash;
// generate base data sample;
// + rand.nextInt(1024);
int count = 1024;
String key;
byte[] data = new byte[64];
long v;
MerkleProof proof;
for (int i = 0; i < count; i++) {
key = "data" + i;
rand.nextBytes(data);
v = ds.setValue(key, data, -1);
dataVersions.put(key, v);
dataValues.put(key + "_" + v, data);
assertEquals(v, 0);
}
mds.commit();
// Take snapshot;
{
rootHash = mds.getRootHash();
Map<String, KeySnapshot> snapshot = new HashMap<>();
for (int i = 0; i < count; i++) {
key = "data" + i;
// TODO: 暂时注释掉默克尔证明相关的内容;
// proof = mds.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
KeySnapshot ks = new KeySnapshot();
// ks.proof = proof;
ks.rootHash = mds.getRootHash();
ks.maxVersion = ds.getVersion(key);
snapshot.put(key, ks);
}
history.put(rootHash, snapshot);
}
// verify;
{
// TODO: 暂时注释掉默克尔证明相关的内容;
// MerkleHashDataset mdsReload = new MerkleHashDataset(rootHash, cryptoConfig, keyPrefix, storage, storage, true);
// // verify every keys;
// Map<String, KeySnapshot> snapshots = history.get(rootHash);
// MerkleProof expProof;
// for (int i = 0; i < count; i++) {
// key = "data" + i;
// proof = mdsReload.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
// expProof = snapshots.get(key).rootHash;
// assertEquals(expProof.toString(), proof.toString());
// }
}
// generate multi-version data sample;
long expVer;
long maxVer = 0;
long minIdx = count;
long maxIdx = 0;
for (int t = 0; t < 100; t++) {
int bound = rand.nextInt(500) + 1;
for (int i = rand.nextInt(count); i < count; i = i + rand.nextInt(bound) + 1) {
key = "data" + i;
rand.nextBytes(data);
expVer = dataVersions.get(key);
v = ds.setValue(key, data, expVer);
assertEquals(v, expVer + 1);
dataVersions.put(key, v);
dataValues.put(key + "_" + v, data);
maxVer = v > maxVer ? v : maxVer;
minIdx = i < minIdx ? i : minIdx;
maxIdx = i > maxIdx ? i : maxIdx;
}
mds.commit();
assertNotEquals(rootHash, mds.getRootHash());
// Take snapshot;
{
// TODO: 暂时注释掉默克尔证明相关的内容;
rootHash = mds.getRootHash();
Map<String, KeySnapshot> snapshot = new HashMap<>();
for (int i = 0; i < count; i++) {
key = "data" + i;
// TODO: 暂时注释掉默克尔证明相关的内容;
// proof = mds.getProof(key);
// assertNotNull(proof);
// assertEquals(rootHash, proof.getRootHash());
KeySnapshot ks = new KeySnapshot();
ks.rootHash = rootHash;
ks.maxVersion = ds.getVersion(key);
snapshot.put(key, ks);
}
history.put(rootHash, snapshot);
}
}
System.out.println(String.format("total count=%s; from %s to %s, max version=%s;", count, minIdx, maxIdx, maxVer));
{
for (HashDigest hisRootHash : history.keySet()) {
Map<String, KeySnapshot> snapshot = history.get(hisRootHash);
MerkleHashDataset mdsReload = new MerkleHashDataset(hisRootHash, cryptoConfig, keyPrefix, storage, storage, true);
Dataset<String, byte[]> dsReload = DatasetHelper.map(mdsReload);
assertEquals(hisRootHash, mdsReload.getRootHash());
// verify every keys;
for (int i = 0; i < count; i++) {
key = "data" + i;
// 最新版本一致;
long expLatestVersion = snapshot.get(key).maxVersion;
long actualLatestVersion = dsReload.getVersion(key);
assertEquals(expLatestVersion, actualLatestVersion);
// TODO: 暂时注释掉默克尔证明相关的内容;
// 数据证明一致;
// proof = mdsReload.getProof(key);
// assertNotNull(proof);
//
// MerkleProof expProof = snapshot.get(key).rootHash;
// assertEquals(expProof, proof);
maxVer = dataVersions.get(key);
assertTrue(actualLatestVersion > -1);
assertTrue(actualLatestVersion <= maxVer);
for (long j = 0; j < actualLatestVersion; j++) {
String keyver = key + "_" + j;
byte[] expValue = dataValues.get(keyver);
byte[] actualValue = dsReload.getValue(key, j);
assertTrue(BytesUtils.equals(expValue, actualValue));
}
}
}
}
}
use of com.jd.blockchain.ledger.MerkleProof in project jdchain-core by blockchain-jd-com.
the class MerkleSequenceTreeTest method testMerkleReload.
/**
* 测试从存储重新加载 Merkle 树的正确性;
*/
/**
*/
// TODO: 暂时注释掉默克尔证明相关的内容;
// @Test
public void testMerkleReload() {
CryptoSetting setting = Mockito.mock(CryptoSetting.class);
when(setting.getHashAlgorithm()).thenReturn(ClassicAlgorithm.SHA256.code());
when(setting.getAutoVerifyHash()).thenReturn(true);
// 保存所有写入的数据节点的 SN-Hash 映射表;
TreeMap<Long, HashDigest> expectedDataNodes = new TreeMap<>();
MerkleNode nd;
// 测试从空的树开始,顺序增加数据节点;
ExistancePolicyKVStorageMap storage = new ExistancePolicyKVStorageMap();
// 创建空的的树;
MerkleSequenceTree mkt = new MerkleSequenceTree(setting, keyPrefix, storage);
long sn = 0;
// 加入 4097 条数据记录,预期构成以一颗 4 层 16 叉树;
int count = 4097;
byte[] dataBuf = new byte[16];
Random rand = new Random();
for (int i = 0; i < count; i++) {
rand.nextBytes(dataBuf);
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
expectedDataNodes.put(sn, nd.getNodeHash());
sn++;
}
mkt.commit();
// 记录一次提交的根哈希以及部分节点信息,用于后续的加载校验;
HashDigest r1_rootHash = mkt.getRootHash();
long r1_dataCount = mkt.getDataCount();
long r1_maxSN = mkt.getMaxSn();
long r1_sn1 = r1_maxSN;
String r1_proof1 = mkt.getProof(r1_sn1).toString();
long r1_sn2 = 1024;
String r1_proof2 = mkt.getProof(r1_sn2).toString();
{
// 检查节点数;
assertEquals(count, mkt.getDataCount());
// 检查最大序列号的正确性;
long maxSN = mkt.getMaxSn();
// count-1;
long expectedMaxSN = 4096;
assertEquals(expectedMaxSN, maxSN);
// 预期扩展到 4 层;
assertEquals(4, mkt.getLevel());
// 路径节点 + 数据节点;
// 预期扩展为 4 层16叉树,由 3 层满16叉树扩展 1 新分支(4个路径节点)而形成;
long expectedNodes = getMaxPathNodeCount(3) + 4 + 4097;
assertEquals(expectedNodes, storage.getCount());
// 重新加载,判断数据是否正确;
MerkleSequenceTree r1_mkt = new MerkleSequenceTree(r1_rootHash, setting, keyPrefix, storage, true);
{
// 验证每一个数据节点都产生了存在性证明;
MerkleProof proof = null;
HashDigest expectedNodeHash = null;
MerkleDataNode reallyDataNode = null;
for (long n = 0; n < maxSN; n++) {
expectedNodeHash = expectedDataNodes.get(n);
reallyDataNode = r1_mkt.getData(n);
assertEquals(expectedNodeHash, reallyDataNode.getNodeHash());
proof = r1_mkt.getProof(n);
assertNotNull(proof);
assertEquals(expectedNodeHash, proof.getDataHash());
}
}
}
// 覆盖到每一路分支修改数据节点;
int storageDataCountBefore = storage.getCount();
// maxSn = 4096;
long maxSN = mkt.getMaxSn();
int i;
for (i = 0; i <= maxSN; i += 16) {
rand.nextBytes(dataBuf);
sn = i;
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
expectedDataNodes.put(sn, nd.getNodeHash());
}
mkt.commit();
// 记录一次提交的根哈希以及部分节点信息,用于后续的加载校验;
HashDigest r2_rootHash = mkt.getRootHash();
long r2_dataCount = mkt.getDataCount();
long r2_maxSN = mkt.getMaxSn();
long r2_sn1 = r1_sn1;
String r2_proof1 = mkt.getProof(r2_sn1).toString();
long r2_sn2 = r1_sn2;
String r2_proof2 = mkt.getProof(r2_sn2).toString();
{
// 检查节点数;
assertEquals(count, mkt.getDataCount());
assertEquals(r1_dataCount, r2_dataCount);
// 检查最大序列号的正确性;
maxSN = mkt.getMaxSn();
// count-1;
long expectedMaxSN = 4096;
assertEquals(expectedMaxSN, maxSN);
// 由于覆盖到每一个分支节点,全部分支节点都重新生成,因此:
// 新增节点数=修改的数据节点数 + 全部分支节点数;
long addCounts = i / 16 + getMaxPathNodeCount(3) + 4;
assertEquals(storageDataCountBefore + addCounts, storage.getCount());
}
// 新插入数据;
final int NEW_INSERTED_COUNT = 18;
for (i = 0; i < NEW_INSERTED_COUNT; i++) {
rand.nextBytes(dataBuf);
sn = maxSN + 1 + i;
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
expectedDataNodes.put(sn, nd.getNodeHash());
}
mkt.commit();
{
// 验证每一个数据节点都产生了存在性证明;
MerkleProof proof = null;
for (Long n : expectedDataNodes.keySet()) {
proof = mkt.getProof(n.longValue());
assertNotNull(proof);
assertEquals(expectedDataNodes.get(n), proof.getDataHash());
}
}
// 记录一次提交的根哈希以及部分节点信息,用于后续的加载校验;
HashDigest r3_rootHash = mkt.getRootHash();
long r3_maxSN = mkt.getMaxSn();
long r3_sn1 = r2_sn1;
String r3_proof1 = mkt.getProof(r3_sn1).toString();
long r3_sn2 = r2_sn2;
String r3_proof2 = mkt.getProof(r3_sn2).toString();
long r3_sn3 = 4096 + NEW_INSERTED_COUNT - 5;
String r3_proof3 = mkt.getProof(r3_sn3).toString();
{
// 检查节点数;
assertEquals(count + NEW_INSERTED_COUNT, mkt.getDataCount());
// 检查最大序列号的正确性;
maxSN = mkt.getMaxSn();
// count-1;
long expectedMaxSN = 4096 + NEW_INSERTED_COUNT;
assertEquals(expectedMaxSN, maxSN);
}
// --------------------
// 重新从存储加载生成新的 MerkleTree 实例,验证与初始实例的一致性;
// 从第 2 轮提交的 Merkle 根哈希加载;
MerkleSequenceTree r1_mkt = new MerkleSequenceTree(r1_rootHash, setting, keyPrefix, storage, true);
assertEquals(r1_maxSN, r1_mkt.getMaxSn());
assertEquals(r1_rootHash, r1_mkt.getRootHash());
assertEquals(r1_dataCount, r1_mkt.getDataCount());
assertEquals(r1_proof1, r1_mkt.getProof(r1_sn1).toString());
assertEquals(r1_proof2, r1_mkt.getProof(r1_sn2).toString());
// 从第 2 轮提交的 Merkle 根哈希加载;
// 第 2 轮生成的 Merkle 树是对第 1 轮的数据的全部节点的修改,因此同一个 SN 的节点的证明是不同的;
MerkleSequenceTree r2_mkt = new MerkleSequenceTree(r2_rootHash, setting, keyPrefix, storage, true);
assertEquals(r1_maxSN, r2_mkt.getMaxSn());
assertEquals(r1_dataCount, r2_mkt.getDataCount());
assertNotEquals(r1_rootHash, r2_mkt.getRootHash());
assertNotEquals(r1_proof1, r2_mkt.getProof(r1_sn1).toString());
assertNotEquals(r1_proof2, r2_mkt.getProof(r1_sn2).toString());
assertEquals(r2_maxSN, r2_mkt.getMaxSn());
assertEquals(r2_rootHash, r2_mkt.getRootHash());
assertEquals(r2_dataCount, r2_mkt.getDataCount());
assertEquals(r2_proof1, r2_mkt.getProof(r2_sn1).toString());
assertEquals(r2_proof2, r2_mkt.getProof(r2_sn2).toString());
// 从第 3 轮提交的 Merkle 根哈希加载;
// 第 3 轮生成的 Merkle 树是在第 2 轮的数据基础上做新增,因此非新增的同一个 SN 的节点的Merkle证明是相同的;
MerkleSequenceTree r3_mkt = new MerkleSequenceTree(r3_rootHash, setting, keyPrefix, storage, true);
assertEquals(r2_maxSN + NEW_INSERTED_COUNT, r3_mkt.getMaxSn());
assertNotEquals(r2_rootHash, r3_mkt.getRootHash());
assertEquals(r2_dataCount + NEW_INSERTED_COUNT, r3_mkt.getDataCount());
assertEquals(r3_maxSN, r3_mkt.getMaxSn());
assertEquals(r3_rootHash, r3_mkt.getRootHash());
assertEquals(r3_proof1, r3_mkt.getProof(r3_sn1).toString());
assertEquals(r3_proof2, r3_mkt.getProof(r3_sn2).toString());
assertEquals(r3_proof3, r3_mkt.getProof(r3_sn3).toString());
// 验证每一个数据节点都产生了存在性证明;
{
MerkleProof proof = null;
for (Long n : expectedDataNodes.keySet()) {
proof = r3_mkt.getProof(n.longValue());
assertNotNull(proof);
assertEquals(expectedDataNodes.get(n), proof.getDataHash());
}
}
}
use of com.jd.blockchain.ledger.MerkleProof in project jdchain-core by blockchain-jd-com.
the class MerkleHashTrieTest method assertMerkleProof.
private MerkleProof assertMerkleProof(VersioningKVData<String, byte[]> data, MerkleTree merkleTree) {
MerkleProof proof_nx = merkleTree.getProof(BytesUtils.toBytes("KEY_NOT_EXIST"));
assertNull(proof_nx);
MerkleProof proof = merkleTree.getProof(BytesUtils.toBytes(data.getKey()), data.getVersion());
assertNotNull(proof);
HashDigest dataHash = SHA256_HASH_FUNC.hash(data.getValue());
assertEquals(dataHash, proof.getDataHash());
HashDigest rootHash = merkleTree.getRootHash();
assertEquals(rootHash, proof.getRootHash());
assertTrue(MerkleProofVerifier.verify(proof));
return proof;
}
use of com.jd.blockchain.ledger.MerkleProof in project jdchain-core by blockchain-jd-com.
the class MerkleHashTrieTest method testMerkleProofCorrectness.
/**
* 测试 Merkle 证明的正确性;
*/
@Test
public void testMerkleProofCorrectness() {
// 长度为 0 的情况;
int count = 0;
// System.out.printf("\r\n\r\n================= %s 个节点 =================\r\n\r\n", count);
List<VersioningKVData<String, byte[]>> dataList = generateDatas(count);
VersioningKVData<String, byte[]>[] datas = toArray(dataList);
MerkleHashTrie merkleTree = newMerkleTree_with_committed(datas);
HashDigest rootHash0 = merkleTree.getRootHash();
assertNotNull(rootHash0);
// 预期空的默克尔树中查询任何数据的证明都获得 null 返回;
MerkleProof proof = merkleTree.getProof("KEY-0");
assertNull(proof);
// 长度为 1 的情况;
count = 1;
// System.out.printf("\r\n\r\n================= %s 个节点 =================\r\n\r\n", count);
dataList = generateDatas(count);
datas = toArray(dataList);
merkleTree = newMerkleTree_with_committed(datas);
HashDigest rootHash1 = merkleTree.getRootHash();
assertNotNull(rootHash1);
// TODO: 暂时忽略默克尔证明的测试;
// // 预期在只有 1 条数据的情况下可以正常得到该数据的默克尔证明;
// MerkleProof proof1_0 = merkleTree.getProof("KEY-0");
// assertNotNull(proof1_0);
// // 依照设计,预期任何默克尔证明都至少有 4 条路径;
// assertMerkleProofPath(proof1_0, merkleTree.getRootHash(), merkleTree.getData("KEY-0"));
// 长度为 2 的情况;
count = 2;
// System.out.printf("\r\n\r\n================= %s 个节点 =================\r\n\r\n", count);
dataList = generateDatas(count);
datas = toArray(dataList);
merkleTree = newMerkleTree_with_committed(datas);
HashDigest rootHash2 = merkleTree.getRootHash();
assertNotNull(rootHash2);
// TODO: 暂时忽略默克尔证明的测试;
// MerkleProof proof2_0 = merkleTree.getProof("KEY-0");
// assertNotNull(proof2_0);
// // 依照设计,预期任何默克尔证明都至少有 4 条路径;
// assertMerkleProofPath(proof2_0, merkleTree.getRootHash(), merkleTree.getData("KEY-0"));
// 长度为 16 的情况;
count = 16;
// System.out.printf("\r\n\r\n================= %s 个节点 =================\r\n\r\n", count);
dataList = generateDatas(count);
datas = toArray(dataList);
merkleTree = newMerkleTree_with_committed(datas);
HashDigest rootHash16 = merkleTree.getRootHash();
assertNotNull(rootHash16);
// TODO: 暂时忽略默克尔证明的测试;
// MerkleProof proof16_0 = merkleTree.getProof("KEY-0");
// assertNotNull(proof16_0);
// // 依照设计,预期任何默克尔证明都至少有 4 条路径;
// assertMerkleProofPath(proof16_0, merkleTree.getRootHash(), merkleTree.getData("KEY-0"));
// 长度为 32 的情况;
count = 32;
// System.out.printf("\r\n\r\n================= %s 个节点 =================\r\n\r\n", count);
dataList = generateDatas(count);
datas = toArray(dataList);
merkleTree = newMerkleTree_with_committed(datas);
HashDigest rootHash32 = merkleTree.getRootHash();
assertNotNull(rootHash32);
// TODO: 暂时忽略默克尔证明的测试;
// MerkleProof proof32_0 = merkleTree.getProof("KEY-0");
// assertNotNull(proof32_0);
// // 依照设计,预期任何默克尔证明都至少有 4 条路径;
// assertMerkleProofPath(proof32_0, merkleTree.getRootHash(), merkleTree.getData("KEY-0"));
// 长度为 1025 的情况;
count = 1025;
// System.out.printf("\r\n\r\n================= %s 个节点 =================\r\n\r\n", count);
dataList = generateDatas(count);
datas = toArray(dataList);
merkleTree = newMerkleTree_with_committed(datas);
HashDigest rootHash1025 = merkleTree.getRootHash();
assertNotNull(rootHash1025);
// TODO: 暂时忽略默克尔证明的测试;
// MerkleProof proof1025 = merkleTree.getProof("KEY-0");
// assertNotNull(proof1025);
// // 依照设计,预期任何默克尔证明都至少有 4 条路径;
// assertMerkleProofPath(proof1025, merkleTree.getRootHash(), merkleTree.getData("KEY-0"));
}
Aggregations