use of io.datarouter.filesystem.snapshot.reader.ScanningSnapshotReader in project datarouter by hotpads.
the class DatarouterSnapshotEntriesHandler method buildTable.
private DomContent buildTable(SnapshotKey snapshotKey, long offset, long limit) {
SnapshotGroup group = groups.getGroup(snapshotKey.groupId);
var reader = new ScanningSnapshotReader(snapshotKey, exec, 2, groups, 1);
SnapshotRecordStringDecoder decoder = ReflectionTool.create(group.getSnapshotEntryDecoderClass());
List<SnapshotRecordStrings> rows = reader.scan(0).skip(offset).limit(limit).map(decoder::decode).list();
var table = new J2HtmlTable<SnapshotRecordStrings>().withClasses("sortable table table-sm table-striped my-4 border").withColumn("id", row -> row.id).withColumn(decoder.keyName(), row -> row.key).withColumn(decoder.valueName(), row -> {
if (row.value == null) {
return "";
} else if (row.value.length() < 64) {
return row.value;
} else {
return row.value.subSequence(0, 64) + "...";
}
}).withHtmlColumn("details", row -> {
String href = new URIBuilder().setPath(request.getContextPath() + snapshotPaths.datarouter.snapshot.individual.entry.toSlashedString()).addParameter(DatarouterSnapshotEntryHandler.P_groupId, snapshotKey.groupId).addParameter(DatarouterSnapshotEntryHandler.P_snapshotId, snapshotKey.snapshotId).addParameter(DatarouterSnapshotEntryHandler.P_id, Long.toString(row.id)).toString();
return td(a("view").withHref(href));
}).build(rows);
return table;
}
use of io.datarouter.filesystem.snapshot.reader.ScanningSnapshotReader in project datarouter by hotpads.
the class SnapshotMerger method combineSnapshots.
private SnapshotWriteResult combineSnapshots(List<SnapshotKey> keys, SnapshotGroup outputGroup) {
SnapshotWriteResult result = Scanner.of(keys).map(key -> new ScanningSnapshotReader(key, readExec, 10, mergeGroup, scanNumBlocks)).collate(reader -> reader.scanLeafRecords(0), SnapshotLeafRecord.KEY_COMPARATOR).deduplicateConsecutiveBy(leafRecord -> leafRecord.key, Arrays::equals).map(SnapshotLeafRecord::entry).batch(10_000).apply(batches -> outputGroup.writeOps().write(writerConfig, batches, writeExec, shouldStop));
keys.forEach(key -> mergeGroup.deleteOps().deleteSnapshot(key, writeExec, 10));
logger.warn("combined {}, {}", keys.size(), keys);
return result;
}
use of io.datarouter.filesystem.snapshot.reader.ScanningSnapshotReader in project datarouter by hotpads.
the class BaseSnapshotTests method testScan.
@Test
public void testScan() {
if (!ENABLED_TESTS.contains(TestId.SCAN)) {
return;
}
BlockLoader blockLoader = makeBlockLoader(useMemoryCache(), shareMemoryCache());
var reader = new ScanningSnapshotReader(snapshotKey, exec, getNumThreads(), blockLoader, SCAN_NUM_BLOCKS);
List<SnapshotRecord> outputs = reader.scan(0).list();
Assert.assertEquals(outputs.size(), sortedInputs.size());
for (int i = 0; i < sortedInputs.size(); ++i) {
Input input = sortedInputs.get(i);
SnapshotRecord output = outputs.get(i);
Assert.assertEquals(i, output.id);
for (int column = 0; column < input.entry.columnValues.length; ++column) {
if (!SnapshotEntry.equalColumnValue(input.entry, output.entry(), column)) {
String message = String.format("%s, actual=%s, expected=%s", i, utf8(output.columnValues[column]), utf8(input.entry.columnValues[column]));
throw new RuntimeException(message);
}
}
}
}
use of io.datarouter.filesystem.snapshot.reader.ScanningSnapshotReader in project datarouter by hotpads.
the class BaseSnapshotTests method testSearches.
@Test
public void testSearches() {
if (!ENABLED_TESTS.contains(TestId.SEARCHES)) {
return;
}
BlockLoader blockLoader = makeBlockLoader(useMemoryCache(), shareMemoryCache());
var reader = new ScanningSnapshotReader(snapshotKey, exec, getNumThreads(), blockLoader, SCAN_NUM_BLOCKS);
int step = 1000;
int limit = 1000;
Scanner.iterate(0, fromId -> fromId += step).advanceWhile(fromId -> fromId < sortedInputs.size() - limit).parallel(new ParallelScannerContext(scanExec, getNumThreads(), true)).forEach(fromId -> {
var idReader = new SnapshotIdReader(snapshotKey, blockLoader);
// known first key inclusive
byte[] searchKey = idReader.getRecord(fromId).key;
List<SnapshotLeafRecord> outputsInclusive = reader.scanLeafRecords(searchKey, true).limit(limit).list();
for (int i = 0; i < limit; ++i) {
Input input = sortedInputs.get(fromId + i);
SnapshotLeafRecord output = outputsInclusive.get(i);
Assert.assertEquals(fromId + i, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
// known first key exclusive
List<SnapshotLeafRecord> outputsExclusive = reader.scanLeafRecords(searchKey, false).limit(limit).list();
for (int i = 0; i < limit; ++i) {
// plus one because exclusive
Input input = sortedInputs.get(fromId + i + 1);
SnapshotLeafRecord output = outputsExclusive.get(i);
Assert.assertEquals(input.id, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
// fake first key (should act like exclusive)
byte[] nonExistentKey = ByteTool.concat(searchKey, new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 });
List<SnapshotLeafRecord> outputsNonExistentKey = reader.scanLeafRecords(nonExistentKey, true).limit(limit).list();
for (int i = 0; i < limit; ++i) {
// plus one because the first key didn't exist
Input input = sortedInputs.get(fromId + i + 1);
SnapshotLeafRecord output = outputsNonExistentKey.get(i);
Assert.assertEquals(input.id, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
});
}
use of io.datarouter.filesystem.snapshot.reader.ScanningSnapshotReader in project datarouter by hotpads.
the class BaseSnapshotTests method testScanValues.
@Test
public void testScanValues() {
if (!ENABLED_TESTS.contains(TestId.SCAN_VALUES)) {
return;
}
BlockLoader blockLoader = makeBlockLoader(useMemoryCache(), shareMemoryCache());
var reader = new ScanningSnapshotReader(snapshotKey, exec, getNumThreads(), blockLoader, SCAN_NUM_BLOCKS);
List<byte[]> actuals = reader.scanValues().list();
Assert.assertEquals(actuals.size(), sortedInputs.size());
for (int i = 0; i < sortedInputs.size(); ++i) {
Input input = sortedInputs.get(i);
Assert.assertEquals(input.entry.value(), actuals.get(i));
}
}
Aggregations