use of com.swirlds.common.crypto.Hash in project hedera-services by hashgraph.
the class MerkleDiskFs method setHashFromContents.
private void setHashFromContents() {
throwIfImmutable("Cannot change this file's content hash if it's immutable.");
var baos = new ByteArrayOutputStream();
try (SerializableDataOutputStream out = new SerializableDataOutputStream(baos)) {
serializeFidInfo(out, fileHashes::get);
} catch (IOException improbable) {
throw new IllegalStateException(improbable);
}
try {
baos.close();
baos.flush();
} catch (IOException improbable) {
throw new IllegalStateException(improbable);
}
super.setHash(new Hash(noThrowSha384HashOf(baos.toByteArray())));
}
use of com.swirlds.common.crypto.Hash in project hedera-services by hashgraph.
the class MerkleSpecialFiles method getHash.
@Override
public Hash getHash() {
final var baos = baosSupplier.get();
for (final var entry : fileContents.entrySet()) {
try {
baos.write(Longs.toByteArray(entry.getKey().getFileNum()));
baos.write(entry.getValue().getHash().getValue());
} catch (IOException e) {
log.error("Hash concatenation failed", e);
throw new UncheckedIOException(e);
}
}
return new Hash(noThrowSha384HashOf(baos.toByteArray()), DigestType.SHA_384);
}
use of com.swirlds.common.crypto.Hash in project hedera-services by hashgraph.
the class VirtualDataSourceNewAPIBench method w3_add10kLeafValues.
/**
* Updates the first 10k leaves with new random values
*/
@Benchmark
public void w3_add10kLeafValues() throws Exception {
dataSource.saveRecords(numEntities, numEntities * 2, Stream.empty(), LongStream.range(nextPath, nextPath + 10_000).mapToObj(i -> new VirtualLeafRecord<>(i + numEntities, hash((int) i), new ContractKey(i, i), new ContractValue(i))), Stream.empty());
nextPath += 10_000;
}
use of com.swirlds.common.crypto.Hash in project hedera-services by hashgraph.
the class VirtualDataSourceNewAPIBench method setup.
@Setup(Level.Trial)
public void setup() {
System.out.println("------- Setup -----------------------------");
storePath = Path.of("store-" + impl);
try {
final boolean storeExists = Files.exists(storePath);
// get slot index suppliers
switch(impl) {
case "lmdb":
dataSource = new VFCDataSourceLmdb<>(// max seralized size
1 + 8 + 32, // max seralized size
ContractKey::new, ContractValue.SERIALIZED_SIZE, ContractValue::new, storePath);
break;
// storePath);
case "jasperdb":
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(storePath).maxNumOfKeys(numEntities + 10_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(Long.MAX_VALUE).mergingEnabled(true);
dataSource = dbBuilder.build("jdb", "4ApiBench");
break;
default:
throw new IllegalStateException("Unexpected value: " + impl);
}
;
// create data
if (!storeExists) {
System.out.println("================================================================================");
System.out.println("Creating data ...");
// create internal nodes and leaves
long iHaveWritten = 0;
while (iHaveWritten < numEntities) {
final long start = System.currentTimeMillis();
final long batchSize = Math.min(WRITE_BATCH_SIZE, numEntities - iHaveWritten);
dataSource.saveRecords(numEntities, numEntities * 2, LongStream.range(iHaveWritten, iHaveWritten + batchSize).mapToObj(i -> new VirtualInternalRecord(i, hash((int) i))), LongStream.range(iHaveWritten, iHaveWritten + batchSize).mapToObj(i -> new VirtualLeafRecord<>(i + numEntities, hash((int) i), new ContractKey(i, i), new ContractValue(i))), Stream.empty());
iHaveWritten += batchSize;
printUpdate(start, batchSize, ContractValue.SERIALIZED_SIZE, "Created " + iHaveWritten + " Nodes");
}
System.out.println("================================================================================");
// set nextPath
nextPath = numEntities;
// let merge catch up
try {
System.out.println("Waiting for merge");
Thread.sleep(30000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
System.out.println("Loaded existing data");
}
} catch (Exception e) {
e.printStackTrace();
}
}
use of com.swirlds.common.crypto.Hash in project hedera-services by hashgraph.
the class VFCDataSourceLmdb method loadLeafRecord.
// ==================================================================================================================
// Private methods
/**
* load a leaf record by path, using the provided key or if null deserializing the key.
*/
private VirtualLeafRecord<K, V> loadLeafRecord(Txn<ByteBuffer> txn, long path, K key) throws IOException {
ByteBuffer keyHashValueBuffer = leafPathToKeyHashValueMap.get(txn, getPathNativeOrderBytes(path));
if (keyHashValueBuffer == null) {
return null;
}
// deserialize
keyHashValueBuffer.clear();
// deserialize key
if (key != null) {
// jump past the key because we don't need to deserialize it
keyHashValueBuffer.position(keySizeBytes);
} else {
final int keySerializationVersion = keyHashValueBuffer.getInt();
key = keyConstructor.get();
key.deserialize(keyHashValueBuffer, keySerializationVersion);
}
// deserialize hash
final Hash hash = new Hash(DigestType.SHA_384);
keyHashValueBuffer.get(hash.getValue());
// deserialize value
final int valueSerializationVersion = keyHashValueBuffer.getInt();
final V value = valueConstructor.get();
value.deserialize(keyHashValueBuffer, valueSerializationVersion);
// return new VirtualLeafRecord
return new VirtualLeafRecord<>(path, hash, key, value);
}
Aggregations