use of com.swirlds.jasperdb.VirtualInternalRecordSerializer in project hedera-services by hashgraph.
the class VirtualDataSourceBench method setup.
@Setup(Level.Trial)
public void setup() throws IOException {
random = new Random(1234);
System.out.println("dataSourcePath = " + dataSourcePath);
if (Files.exists(dataSourcePath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db.");
deleteDirectoryAndContents(dataSourcePath);
}
if (Files.exists(dataSourceSnapshotPath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db snapshot.");
deleteDirectoryAndContents(dataSourceSnapshotPath);
}
// create data source
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(dataSourcePath).maxNumOfKeys(500_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(0).mergingEnabled(false);
dataSource = dbBuilder.build("jdb", "4dsBench");
// populate with initial data
System.out.printf("Creating initial data set of %,d leaves\n", initialDataSize);
progressPercentage = 0;
final long firstLeafPath = initialDataSize;
final long lastLeafPath = firstLeafPath + initialDataSize;
var internalRecordStream = LongStream.range(0, firstLeafPath).mapToObj(path -> new VirtualInternalRecord(path, hash((int) path)));
var leafRecordStream = LongStream.range(firstLeafPath, lastLeafPath + 1).mapToObj(path -> new VirtualLeafRecord<>(path, hash((int) path), new ContractKey(path, path), new ContractValue(path))).peek(leaf -> printProgress(leaf.getPath(), lastLeafPath));
dataSource.saveRecords(firstLeafPath, lastLeafPath, internalRecordStream, leafRecordStream, Stream.empty());
System.out.printf("Done creating initial data set of %,d leaves\n", initialDataSize);
}
use of com.swirlds.jasperdb.VirtualInternalRecordSerializer in project hedera-services by hashgraph.
the class ContractDataSourceValidator method main.
public static void main(String[] args) throws IOException {
Path dataSourcePath = Path.of("").toAbsolutePath();
System.out.println("dataSourcePath = " + dataSourcePath + " exists " + Files.exists(dataSourcePath));
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
;
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(dataSourcePath).maxNumOfKeys(500_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(0).mergingEnabled(true);
DataSourceValidator<ContractKey, ContractValue> dataSourceValidator = new DataSourceValidator<>(dbBuilder.build("jdb", "4validator"));
dataSourceValidator.validate();
}
use of com.swirlds.jasperdb.VirtualInternalRecordSerializer in project hedera-services by hashgraph.
the class VirtualDataSourceNewAPIBench method setup.
@Setup(Level.Trial)
public void setup() {
System.out.println("------- Setup -----------------------------");
storePath = Path.of("store-" + impl);
try {
final boolean storeExists = Files.exists(storePath);
// get slot index suppliers
switch(impl) {
case "lmdb":
dataSource = new VFCDataSourceLmdb<>(// max seralized size
1 + 8 + 32, // max seralized size
ContractKey::new, ContractValue.SERIALIZED_SIZE, ContractValue::new, storePath);
break;
// storePath);
case "jasperdb":
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(storePath).maxNumOfKeys(numEntities + 10_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(Long.MAX_VALUE).mergingEnabled(true);
dataSource = dbBuilder.build("jdb", "4ApiBench");
break;
default:
throw new IllegalStateException("Unexpected value: " + impl);
}
;
// create data
if (!storeExists) {
System.out.println("================================================================================");
System.out.println("Creating data ...");
// create internal nodes and leaves
long iHaveWritten = 0;
while (iHaveWritten < numEntities) {
final long start = System.currentTimeMillis();
final long batchSize = Math.min(WRITE_BATCH_SIZE, numEntities - iHaveWritten);
dataSource.saveRecords(numEntities, numEntities * 2, LongStream.range(iHaveWritten, iHaveWritten + batchSize).mapToObj(i -> new VirtualInternalRecord(i, hash((int) i))), LongStream.range(iHaveWritten, iHaveWritten + batchSize).mapToObj(i -> new VirtualLeafRecord<>(i + numEntities, hash((int) i), new ContractKey(i, i), new ContractValue(i))), Stream.empty());
iHaveWritten += batchSize;
printUpdate(start, batchSize, ContractValue.SERIALIZED_SIZE, "Created " + iHaveWritten + " Nodes");
}
System.out.println("================================================================================");
// set nextPath
nextPath = numEntities;
// let merge catch up
try {
System.out.println("Waiting for merge");
Thread.sleep(30000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
System.out.println("Loaded existing data");
}
} catch (Exception e) {
e.printStackTrace();
}
}
use of com.swirlds.jasperdb.VirtualInternalRecordSerializer in project hedera-services by hashgraph.
the class DatabaseState method setupDatabase.
@Setup(Level.Trial)
public void setupDatabase() throws IOException {
System.out.println("dataSourcePath = " + dataSourcePath + " mergingEnabled=" + mergingEnabled);
if (Files.exists(dataSourcePath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db.");
deleteDirectoryAndContents(dataSourcePath);
}
if (Files.exists(dataSourceSnapshotPath)) {
System.err.println("!!!!!!!!!!!!! Deleting old db snapshot.");
deleteDirectoryAndContents(dataSourceSnapshotPath);
}
// create data source
VirtualLeafRecordSerializer<ContractKey, ContractValue> virtualLeafRecordSerializer = new VirtualLeafRecordSerializer<>((short) 1, DigestType.SHA_384, (short) 1, DataFileCommon.VARIABLE_DATA_SIZE, new ContractKeySupplier(), (short) 1, ContractValue.SERIALIZED_SIZE, new ContractValueSupplier(), true);
JasperDbBuilder<ContractKey, ContractValue> dbBuilder = new JasperDbBuilder<>();
dbBuilder.virtualLeafRecordSerializer(virtualLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(new ContractKeySerializer()).storageDir(dataSourcePath).maxNumOfKeys(500_000_000).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(0).mergingEnabled(mergingEnabled);
dataSource = dbBuilder.build("jdb", "4dbState");
// populate with initial data
System.out.printf("Creating initial data set of %,d leaves\n", initialDataSize);
progressPercentage = 0;
final long firstLeafPath = initialDataSize;
final long lastLeafPath = firstLeafPath + initialDataSize;
var internalRecordStream = LongStream.range(0, firstLeafPath).mapToObj(path -> new VirtualInternalRecord(path, hash((int) path))).peek(internalRecord -> printProgress(internalRecord.getPath(), lastLeafPath));
var leafRecordStream = LongStream.range(firstLeafPath, lastLeafPath + 1).mapToObj(path -> new VirtualLeafRecord<>(path, hash((int) path), new ContractKey(path, path), new ContractValue(path))).peek(leaf -> printProgress(leaf.getPath(), lastLeafPath));
dataSource.saveRecords(firstLeafPath, lastLeafPath, internalRecordStream, leafRecordStream, Stream.empty());
System.out.printf("Done creating initial data set of %,d leaves\n", initialDataSize);
}
use of com.swirlds.jasperdb.VirtualInternalRecordSerializer in project hedera-services by hashgraph.
the class VirtualMapFactory method newVirtualizedBlobs.
public VirtualMap<VirtualBlobKey, VirtualBlobValue> newVirtualizedBlobs() {
final var blobKeySerializer = new VirtualBlobKeySerializer();
final VirtualLeafRecordSerializer<VirtualBlobKey, VirtualBlobValue> blobLeafRecordSerializer = new VirtualLeafRecordSerializer<>(CURRENT_SERIALIZATION_VERSION, DigestType.SHA_384, CURRENT_SERIALIZATION_VERSION, VirtualBlobKey.sizeInBytes(), new VirtualBlobKeySupplier(), CURRENT_SERIALIZATION_VERSION, VirtualBlobValue.sizeInBytes(), new VirtualBlobValueSupplier(), false);
final JasperDbBuilder<VirtualBlobKey, VirtualBlobValue> dsBuilder = jdbBuilderFactory.newJdbBuilder();
dsBuilder.virtualLeafRecordSerializer(blobLeafRecordSerializer).virtualInternalRecordSerializer(new VirtualInternalRecordSerializer()).keySerializer(blobKeySerializer).maxNumOfKeys(MAX_BLOBS).preferDiskBasedIndexes(false).internalHashesRamToDiskThreshold(MAX_IN_MEMORY_INTERNAL_HASHES);
return new VirtualMap<>(BLOBS_VM_NAME, dsBuilder);
}
Aggregations