use of com.jd.blockchain.storage.service.utils.ExistancePolicyKVStorageMap in project jdchain-core by blockchain-jd-com.
the class MerkleSequenceTreeTest method testWriteAndRead.
@Test
public void testWriteAndRead() {
Random rand = new Random();
CryptoSetting setting = Mockito.mock(CryptoSetting.class);
when(setting.getHashAlgorithm()).thenReturn(ClassicAlgorithm.SHA256.code());
when(setting.getAutoVerifyHash()).thenReturn(true);
// 测试从空的树开始,顺序增加数据节点;
ExistancePolicyKVStorageMap kvs1 = new ExistancePolicyKVStorageMap();
MerkleSequenceTree mkt = new MerkleSequenceTree(setting, Bytes.fromString(keyPrefix), kvs1);
// 初始化,按照顺序的序列号加入10条记录;
int count = 18;
byte[] dataBuf = new byte[16];
DataNode[] dataNodes = new DataNode[count];
for (int i = 0; i < count; i++) {
rand.nextBytes(dataBuf);
long sn = i;
dataNodes[i] = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
assertEquals(sn, dataNodes[i].getSN());
}
mkt.commit();
HashDigest rootHash = mkt.getRootHash();
assertNotNull(rootHash);
long maxSN = mkt.getMaxSn();
long dataCount = mkt.getDataCount();
assertEquals(count, dataCount);
// 仅仅在采用顺序加入的情况下成立;
assertEquals(dataCount - 1, maxSN);
// reload;
mkt = new MerkleSequenceTree(rootHash, setting, keyPrefix, kvs1, false);
// 校验是否与之前的一致;
maxSN = mkt.getMaxSn();
dataCount = mkt.getDataCount();
assertEquals(count, dataCount);
// 仅仅在采用顺序加入的情况下成立;
assertEquals(dataCount - 1, maxSN);
// 取每一个数据节点
for (int i = 0; i <= maxSN; i++) {
DataNode dataNode = mkt.getData(i);
assertEquals(i, dataNode.getSN());
assertEquals(dataNodes[i].getNodeHash(), dataNode.getNodeHash());
assertEquals(dataNodes[i].getKey(), dataNode.getKey());
assertEquals(dataNodes[i].getLevel(), dataNode.getLevel());
assertEquals(dataNodes[i].getVersion(), dataNode.getVersion());
}
}
use of com.jd.blockchain.storage.service.utils.ExistancePolicyKVStorageMap in project jdchain-core by blockchain-jd-com.
the class MerkleSequenceTreeTest method testRandomInsert_MultiCommit.
/**
* 测试以多次提交的方式随机地插入数据;
*/
@Test
public void testRandomInsert_MultiCommit() {
CryptoSetting setting = Mockito.mock(CryptoSetting.class);
when(setting.getHashAlgorithm()).thenReturn(ClassicAlgorithm.SHA256.code());
when(setting.getAutoVerifyHash()).thenReturn(true);
// 保存所有写入的数据节点的 SN-Hash 映射表;
TreeMap<Long, HashDigest> dataNodes = new TreeMap<>();
MerkleNode nd;
// 测试从空的树开始,顺序增加数据节点;
ExistancePolicyKVStorageMap kvs1 = new ExistancePolicyKVStorageMap();
MerkleSequenceTree mkt = new MerkleSequenceTree(setting, keyPrefix, kvs1);
// 加入 30 条数据记录,分两批各15条分别从序号两端加入,预期构成以一颗 4 层 16 叉树;
int count = 4097;
byte[] dataBuf = new byte[16];
Random rand = new Random();
long sn = 0;
for (int i = 0; i < 15; i++) {
rand.nextBytes(dataBuf);
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
dataNodes.put(sn, nd.getNodeHash());
sn++;
}
sn = count - 1;
for (int i = 0; i < 15; i++) {
rand.nextBytes(dataBuf);
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
dataNodes.put(sn, nd.getNodeHash());
sn--;
}
mkt.commit();
assertEquals(30, mkt.getDataCount());
// 检查最大序列号的正确性;
long maxSN = mkt.getMaxSn();
assertEquals(count - 1, maxSN);
// 预期扩展到 4 层;
assertEquals(4, mkt.getLevel());
// 路径节点 + 数据节点;
// 预期扩展为 4 层16叉树,共9个路径节点,加30个数据节点;
long expectedNodes = 9 + 30;
assertEquals(expectedNodes, kvs1.getCount());
// ---------------------------------
// 在 15 - 4081 之间随机插入;
int randomInterval = 4082 - 15;
List<Long> snPool = new LinkedList<>();
for (int i = 0; i < randomInterval; i++) {
snPool.add(15L + i);
}
for (int i = 0; i < randomInterval; i++) {
int selected = rand.nextInt(snPool.size());
sn = snPool.remove(selected).longValue();
rand.nextBytes(dataBuf);
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf);
dataNodes.put(sn, nd.getNodeHash());
}
mkt.commit();
assertEquals(4097, mkt.getDataCount());
// 检查最大序列号的正确性;
maxSN = mkt.getMaxSn();
assertEquals(count - 1, maxSN);
// 预期扩展到 4 层;
assertEquals(4, mkt.getLevel());
// 路径节点 + 数据节点;
// 预期扩展为 4 层16叉树,之前的路径节点全部被重新计算哈希, 等同于在3层满16叉树之上加入1条记录,共273路径节点,加上 sn 为 4096
// 的数据对于 4个路径节点(由于是上次写入的,4个路径节点中本次只更新了根节点);
expectedNodes = expectedNodes + 273 + 1 + randomInterval;
assertEquals(expectedNodes, kvs1.getCount());
// TODO: 暂时注释掉默克尔证明相关的内容;
// 验证每一个数据节点都产生了存在性证明;
// MerkleProof proof = null;
// for (Long n : dataNodes.keySet()) {
// proof = mkt.getProof(n.longValue());
// assertNotNull(proof);
// assertEquals(dataNodes.get(n), proof.getDataHash());
// }
}
Aggregations