use of io.datarouter.bytes.Bytes in project datarouter by hotpads.
the class DecodingBlockLoader method leafRange.
@Override
public Scanner<LeafBlock> leafRange(LeafBlockRange range) {
BlockKey rangeBlockKey = range.rangeBlockKey();
byte[] compressedRangeBytes = snapshotBlockStorageReader.getLeafBlock(paths, rangeBlockKey);
return range.parse(compressedRangeBytes).map(// TODO let decompressors accept Bytes to avoid this mem copy?
Bytes::toArray).map(blockDecompressor::leaf).map(blockDecoder::leaf);
}
use of io.datarouter.bytes.Bytes in project datarouter by hotpads.
the class BranchBlock method findChildBlockIndex.
default int findChildBlockIndex(byte[] searchKey) {
var searchKeyRange = new Bytes(searchKey);
int numKeys = numRecords();
int low = 0;
int high = numKeys - 1;
while (low <= high) {
int mid = (low + high) >>> 1;
Bytes midVal = key(mid);
int diff = midVal.compareTo(searchKeyRange);
if (diff < 0) {
low = mid + 1;
} else if (diff > 0) {
high = mid - 1;
} else {
return mid;
}
}
return low;
}
use of io.datarouter.bytes.Bytes in project datarouter by hotpads.
the class BaseSnapshotTests method testSearches.
@Test
public void testSearches() {
if (!ENABLED_TESTS.contains(TestId.SEARCHES)) {
return;
}
BlockLoader blockLoader = makeBlockLoader(useMemoryCache(), shareMemoryCache());
var reader = new ScanningSnapshotReader(snapshotKey, exec, getNumThreads(), blockLoader, SCAN_NUM_BLOCKS);
int step = 1000;
int limit = 1000;
Scanner.iterate(0, fromId -> fromId += step).advanceWhile(fromId -> fromId < sortedInputs.size() - limit).parallel(new ParallelScannerContext(scanExec, getNumThreads(), true)).forEach(fromId -> {
var idReader = new SnapshotIdReader(snapshotKey, blockLoader);
// known first key inclusive
byte[] searchKey = idReader.getRecord(fromId).key;
List<SnapshotLeafRecord> outputsInclusive = reader.scanLeafRecords(searchKey, true).limit(limit).list();
for (int i = 0; i < limit; ++i) {
Input input = sortedInputs.get(fromId + i);
SnapshotLeafRecord output = outputsInclusive.get(i);
Assert.assertEquals(fromId + i, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
// known first key exclusive
List<SnapshotLeafRecord> outputsExclusive = reader.scanLeafRecords(searchKey, false).limit(limit).list();
for (int i = 0; i < limit; ++i) {
// plus one because exclusive
Input input = sortedInputs.get(fromId + i + 1);
SnapshotLeafRecord output = outputsExclusive.get(i);
Assert.assertEquals(input.id, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
// fake first key (should act like exclusive)
byte[] nonExistentKey = ByteTool.concat(searchKey, new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 });
List<SnapshotLeafRecord> outputsNonExistentKey = reader.scanLeafRecords(nonExistentKey, true).limit(limit).list();
for (int i = 0; i < limit; ++i) {
// plus one because the first key didn't exist
Input input = sortedInputs.get(fromId + i + 1);
SnapshotLeafRecord output = outputsNonExistentKey.get(i);
Assert.assertEquals(input.id, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
}
});
}
use of io.datarouter.bytes.Bytes in project datarouter by hotpads.
the class FieldSetToolTests method testGetConcatenatedValueBytes.
@Test
public void testGetConcatenatedValueBytes() {
List<Field<?>> fields = List.of(new IntegerField(new IntegerFieldKey("a"), 55), new StringField(new StringFieldKey("b"), "abc"), new StringField(new StringFieldKey("c"), "xyz"));
int lengthWithout = 4 + 3 + 1 + 3;
int lengthWith = lengthWithout + 1;
Bytes withoutTrailingByte = new Bytes(FieldTool.getConcatenatedValueBytesUnterminated(fields));
Bytes withTrailingByte = new Bytes(FieldTool.getConcatenatedValueBytes(fields));
Assert.assertEquals(withoutTrailingByte.getLength(), lengthWithout);
Assert.assertEquals(withTrailingByte.getLength(), lengthWith);
}
use of io.datarouter.bytes.Bytes in project datarouter by hotpads.
the class BaseSnapshotTests method testParialScans.
@Test
public void testParialScans() {
if (!ENABLED_TESTS.contains(TestId.PARTIAL_SCANS)) {
return;
}
BlockLoader blockLoader = makeBlockLoader(useMemoryCache(), shareMemoryCache());
var reader = new ScanningSnapshotReader(snapshotKey, exec, getNumThreads(), blockLoader, SCAN_NUM_BLOCKS);
int step = 1000;
int limit = 1000;
Scanner.iterate(0, fromId -> fromId += step).advanceWhile(fromId -> fromId < sortedInputs.size() - limit).parallel(new ParallelScannerContext(scanExec, getNumThreads(), true)).forEach(fromId -> {
var timer = new PhaseTimer(fromId + "");
List<SnapshotRecord> outputs = reader.scan(fromId).limit(limit).list();
timer.add("got " + outputs.size());
for (int i = 0; i < limit; ++i) {
Input input = sortedInputs.get(fromId + i);
SnapshotRecord output = outputs.get(i);
Assert.assertEquals(fromId + i, output.id);
Assert.assertEquals(new Bytes(input.entry.key()), new Bytes(output.key));
for (int column = 0; column < input.entry.columnValues.length; ++column) {
if (!SnapshotEntry.equalColumnValue(input.entry, output.entry(), column)) {
String message = String.format("%s, actual=%s, expected=%s", i, utf8(output.columnValues[column]), utf8(input.entry.columnValues[column]));
throw new RuntimeException(message);
}
}
}
timer.add("assert");
logger.info("{}", timer);
});
}
Aggregations