use of io.datarouter.filesystem.snapshot.reader.SnapshotIdReader in project datarouter by hotpads.
the class DatarouterSnapshotEntryHandler method buildTable.
private ContainerTag<?> buildTable(SnapshotKey snapshotKey, long id) {
SnapshotGroup group = groups.getGroup(snapshotKey.groupId);
var reader = new SnapshotIdReader(snapshotKey, groups);
SnapshotRecord record = reader.getRecord(id);
SnapshotRecordStringDecoder decoder = ReflectionTool.create(group.getSnapshotEntryDecoderClass());
SnapshotRecordStrings decoded = decoder.decode(record);
List<Twin<String>> rows = new ArrayList<>();
rows.add(new Twin<>("id", Long.toString(record.id)));
rows.add(new Twin<>(decoder.keyName(), decoded.key));
rows.add(new Twin<>(decoder.valueName(), decoded.value));
IntStream.range(0, decoded.columnValues.size()).mapToObj(column -> new Twin<>(decoder.columnValueName(column), decoded.columnValues.get(column))).forEach(rows::add);
var table = new J2HtmlTable<Twin<String>>().withClasses("sortable table table-sm table-striped my-4 border").withColumn("field", twin -> twin.getLeft()).withColumn("value", twin -> twin.getRight()).build(rows);
return table;
}
use of io.datarouter.filesystem.snapshot.reader.SnapshotIdReader in project datarouter by hotpads.
the class BaseSnapshotTests method testSearches.
@Test
public void testSearches() {
if (!ENABLED_TESTS.contains(TestId.SEARCHES)) {
return;
}
BlockLoader blockLoader = makeBlockLoader(useMemoryCache(), shareMemoryCache());
var reader = new ScanningSnapshotReader(snapshotKey, exec, getNumThreads(), blockLoader, SCAN_NUM_BLOCKS);
int step = 1000;
int limit = 1000;
Scanner.iterate(0, fromId -> fromId += step).advanceWhile(fromId -> fromId < sortedInputs.size() - limit).parallel(new ParallelScannerContext(scanExec, getNumThreads(), true)).forEach(fromId -> {
var idReader = new SnapshotIdReader(snapshotKey, blockLoader);
// known first key inclusive
byte[] searchKey = idReader.getRecord(fromId).key;
List<SnapshotLeafRecord> outputsInclusive = reader.scanLeafRecords(searchKey, true).limit(limit).list();
for (int i = 0; i < limit; ++i) {
Input input = sortedInputs.get(fromId + i);
SnapshotLeafRecord output = outputsInclusive.get(i);
Assert.assertEquals(fromId + i, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
// known first key exclusive
List<SnapshotLeafRecord> outputsExclusive = reader.scanLeafRecords(searchKey, false).limit(limit).list();
for (int i = 0; i < limit; ++i) {
// plus one because exclusive
Input input = sortedInputs.get(fromId + i + 1);
SnapshotLeafRecord output = outputsExclusive.get(i);
Assert.assertEquals(input.id, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
// fake first key (should act like exclusive)
byte[] nonExistentKey = ByteTool.concat(searchKey, new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 });
List<SnapshotLeafRecord> outputsNonExistentKey = reader.scanLeafRecords(nonExistentKey, true).limit(limit).list();
for (int i = 0; i < limit; ++i) {
// plus one because the first key didn't exist
Input input = sortedInputs.get(fromId + i + 1);
SnapshotLeafRecord output = outputsNonExistentKey.get(i);
Assert.assertEquals(input.id, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
});
}
use of io.datarouter.filesystem.snapshot.reader.SnapshotIdReader in project datarouter by hotpads.
the class BaseSnapshotTests method testOperationInternal.
private void testOperationInternal(BlockLoader threadSafeBlockLoader, boolean random, boolean multiThreaded, Operation operation) {
List<Input> searchKeys = random ? randomInputs : sortedInputs;
int batchSize = 10_000;
var parallelScannerContext = new ParallelScannerContext(exec, getNumThreads(), true, multiThreaded);
var count = new AtomicLong();
Scanner.of(searchKeys).batch(batchSize).parallel(parallelScannerContext).forEach(batch -> {
var idReader = new SnapshotIdReader(snapshotKey, threadSafeBlockLoader);
var keyReader = new SnapshotKeyReader(snapshotKey, threadSafeBlockLoader);
for (int i = 0; i < batch.size(); ++i) {
Input input = batch.get(i);
long id = input.id;
byte[] key = input.entry.key();
byte[] value = input.entry.value();
if (Operation.GET_LEAF_RECORD == operation) {
SnapshotLeafRecord leafRecord = idReader.leafRecord(id);
if (!Arrays.equals(key, leafRecord.key)) {
String message = String.format("%s, expected=%s, actual=%s", id, utf8(key), utf8(leafRecord.key));
throw new RuntimeException(message);
}
if (!Arrays.equals(value, leafRecord.value)) {
String message = String.format("%s, expected=%s, actual=%s", id, utf8(value), utf8(leafRecord.value));
throw new RuntimeException(message);
}
} else if (Operation.GET_RECORD == operation) {
SnapshotRecord result = idReader.getRecord(id);
if (id != result.id) {
String message = String.format("%s, expected=%s, actual=%s", id, id, result.id);
throw new RuntimeException(message);
}
if (!Arrays.equals(key, result.key)) {
String message = String.format("%s, expected=%s, actual=%s", id, utf8(key), utf8(result.key));
throw new RuntimeException(message);
}
if (!SnapshotEntry.equal(input.entry, result.entry())) {
String message = String.format("%s, expected=%s, actual=%s", i, // TODO print more than column 0
utf8(input.entry.columnValues[0]), utf8(result.columnValues[0]));
throw new RuntimeException(message);
}
} else if (Operation.FIND_ID == operation) {
if (keyReader.findRecordId(key).isEmpty()) {
String message = String.format("%s, %s not found", i, utf8(key));
throw new RuntimeException(message);
}
if (id != keyReader.findRecordId(key).get().longValue()) {
String message = String.format("%s, %s not found", i, utf8(key));
throw new RuntimeException(message);
}
} else if (Operation.FIND_RECORD == operation) {
Optional<SnapshotRecord> output = keyReader.findRecord(key);
if (output.isEmpty()) {
String message = String.format("%s, %s not found", i, utf8(key));
throw new RuntimeException(message);
}
if (!SnapshotEntry.equal(input.entry, output.get().entry())) {
String message = String.format("%s, expected=%s, actual=%s", i, // TODO print more than column 0
utf8(batch.get(i).entry.columnValues[0]), utf8(output.get().columnValues[0]));
throw new RuntimeException(message);
}
}
}
count.addAndGet(batch.size());
logger.warn("{}, {}, {} for {}/{} {}", random ? "random" : "sorted", multiThreaded ? "multi" : "single", operation.toString().toLowerCase(), NumberFormatter.addCommas(count.get()), NumberFormatter.addCommas(searchKeys.size()), utf8(ListTool.getLast(batch).entry.key()));
});
}
use of io.datarouter.filesystem.snapshot.reader.SnapshotIdReader in project datarouter by hotpads.
the class FilesystemSnapshotLargeTests method testGets.
@Test
public void testGets() {
if (!PERSIST) {
return;
}
var reader = new SnapshotIdReader(benchmark.snapshotKey, cache);
for (long id = 0; id < benchmark.numEntries; ++id) {
SnapshotLeafRecord leafRecord = reader.leafRecord(id);
Assert.assertEquals(leafRecord.key, SnapshotBenchmark.makeKey(id));
Assert.assertEquals(leafRecord.value, SnapshotBenchmark.makeValue(id));
}
}
Aggregations