use of org.apache.datasketches.memory.WritableMemory in project druid by druid-io.
the class MemoryOpenHashTableTest method testInsertRepeatedKeys.
@Test
public void testInsertRepeatedKeys() {
final MemoryOpenHashTable table = createTable(8, .7, Integer.BYTES, Integer.BYTES);
final WritableMemory keyMemory = WritableMemory.allocate(Integer.BYTES);
// Insert the following keys repeatedly.
final int[] keys = { 0, 1, 2 };
for (int i = 0; i < 3; i++) {
for (int key : keys) {
// Find bucket for key.
keyMemory.putInt(0, key);
int bucket = table.findBucket(HashTableUtils.hashMemory(keyMemory, 0, Integer.BYTES), keyMemory, 0);
if (bucket < 0) {
Assert.assertTrue(table.canInsertNewBucket());
bucket = -(bucket + 1);
table.initBucket(bucket, keyMemory, 0);
final int valuePosition = table.bucketMemoryPosition(bucket) + table.bucketValueOffset();
// Initialize to zero.
table.memory().putInt(valuePosition, 0);
}
// Add the key.
final int valuePosition = table.bucketMemoryPosition(bucket) + table.bucketValueOffset();
table.memory().putInt(valuePosition, table.memory().getInt(valuePosition) + key);
}
}
final Map<ByteBuffer, ByteBuffer> expectedMap = new HashMap<>();
expectedMap.put(expectedKey(0), expectedValue(0));
expectedMap.put(expectedKey(1), expectedValue(3));
expectedMap.put(expectedKey(2), expectedValue(6));
assertEqualsMap(expectedMap, table);
}
use of org.apache.datasketches.memory.WritableMemory in project druid by druid-io.
the class MemoryOpenHashTableTest method findAndInitBucket.
/**
* Finds the bucket for the provided key using {@link MemoryOpenHashTable#findBucket} and initializes it if empty
* using {@link MemoryOpenHashTable#initBucket}. Same return value as {@link MemoryOpenHashTable#findBucket}.
*/
private static int findAndInitBucket(final MemoryOpenHashTable table, final int key) {
// Helps verify that offsets work
final int keyMemoryPosition = 1;
final WritableMemory keyMemory = WritableMemory.allocate(Integer.BYTES + 1);
keyMemory.putInt(keyMemoryPosition, key);
final int bucket = table.findBucket(HashTableUtils.hashMemory(keyMemory, keyMemoryPosition, Integer.BYTES), keyMemory, keyMemoryPosition);
if (bucket < 0) {
table.initBucket(-(bucket + 1), keyMemory, keyMemoryPosition);
}
return bucket;
}
use of org.apache.datasketches.memory.WritableMemory in project druid by druid-io.
the class HashVectorGrouperTest method testGrowFourTimes.
@Test
public void testGrowFourTimes() {
final int maxVectorSize = 512;
final int keySize = 4;
final int aggSize = 8;
final WritableMemory keySpace = WritableMemory.allocate(keySize * maxVectorSize);
final AggregatorAdapters aggregatorAdapters = Mockito.mock(AggregatorAdapters.class);
Mockito.when(aggregatorAdapters.spaceNeeded()).thenReturn(aggSize);
int startingNumBuckets = 4;
int maxBuckets = 128;
final int bufferSize = (keySize + aggSize) * maxBuckets;
final ByteBuffer buffer = ByteBuffer.wrap(new byte[bufferSize]);
final HashVectorGrouper grouper = new HashVectorGrouper(Suppliers.ofInstance(buffer), keySize, aggregatorAdapters, maxBuckets, 0.f, startingNumBuckets);
grouper.initVectorized(maxVectorSize);
int tableStart = grouper.getTableStart();
// two keys should cause buffer to grow
fillKeyspace(keySpace, maxVectorSize, 2);
AggregateResult result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertEquals(tableStart, grouper.getTableStart());
// 3rd key should cause buffer to grow
// buffer should grow to next size, but is not full
fillKeyspace(keySpace, maxVectorSize, 3);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertTrue(grouper.getTableStart() > tableStart);
tableStart = grouper.getTableStart();
// grow it again
fillKeyspace(keySpace, maxVectorSize, 6);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertTrue(grouper.getTableStart() > tableStart);
tableStart = grouper.getTableStart();
// more
fillKeyspace(keySpace, maxVectorSize, 14);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertTrue(grouper.getTableStart() > tableStart);
// this time should be all the way
fillKeyspace(keySpace, maxVectorSize, 25);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertEquals(0, grouper.getTableStart());
}
use of org.apache.datasketches.memory.WritableMemory in project druid by druid-io.
the class HashVectorGrouperTest method testGrowOnce.
@Test
public void testGrowOnce() {
final int maxVectorSize = 512;
final int keySize = 4;
final int aggSize = 8;
final WritableMemory keySpace = WritableMemory.allocate(keySize * maxVectorSize);
final AggregatorAdapters aggregatorAdapters = Mockito.mock(AggregatorAdapters.class);
Mockito.when(aggregatorAdapters.spaceNeeded()).thenReturn(aggSize);
int startingNumBuckets = 4;
int maxBuckets = 16;
final int bufferSize = (keySize + aggSize) * maxBuckets;
final ByteBuffer buffer = ByteBuffer.wrap(new byte[bufferSize]);
final HashVectorGrouper grouper = new HashVectorGrouper(Suppliers.ofInstance(buffer), keySize, aggregatorAdapters, maxBuckets, 0.f, startingNumBuckets);
grouper.initVectorized(maxVectorSize);
int tableStart = grouper.getTableStart();
// two keys should not cause buffer to grow
fillKeyspace(keySpace, maxVectorSize, 2);
AggregateResult result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertEquals(tableStart, grouper.getTableStart());
// 3rd key should cause buffer to grow
// buffer should grow to maximum size
fillKeyspace(keySpace, maxVectorSize, 3);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertEquals(0, grouper.getTableStart());
}
use of org.apache.datasketches.memory.WritableMemory in project druid by druid-io.
the class HashVectorGrouperTest method testGrowThreeTimes.
@Test
public void testGrowThreeTimes() {
final int maxVectorSize = 512;
final int keySize = 4;
final int aggSize = 8;
final WritableMemory keySpace = WritableMemory.allocate(keySize * maxVectorSize);
final AggregatorAdapters aggregatorAdapters = Mockito.mock(AggregatorAdapters.class);
Mockito.when(aggregatorAdapters.spaceNeeded()).thenReturn(aggSize);
int startingNumBuckets = 4;
int maxBuckets = 64;
final int bufferSize = (keySize + aggSize) * maxBuckets;
final ByteBuffer buffer = ByteBuffer.wrap(new byte[bufferSize]);
final HashVectorGrouper grouper = new HashVectorGrouper(Suppliers.ofInstance(buffer), keySize, aggregatorAdapters, maxBuckets, 0.f, startingNumBuckets);
grouper.initVectorized(maxVectorSize);
int tableStart = grouper.getTableStart();
// two keys should cause buffer to grow
fillKeyspace(keySpace, maxVectorSize, 2);
AggregateResult result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertEquals(tableStart, grouper.getTableStart());
// 3rd key should cause buffer to grow
// buffer should grow to next size, but is not full
fillKeyspace(keySpace, maxVectorSize, 3);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertTrue(grouper.getTableStart() > tableStart);
tableStart = grouper.getTableStart();
// grow it again
fillKeyspace(keySpace, maxVectorSize, 6);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertTrue(grouper.getTableStart() > tableStart);
// this time should be all the way
fillKeyspace(keySpace, maxVectorSize, 14);
result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
Assert.assertTrue(result.isOk());
Assert.assertEquals(0, grouper.getTableStart());
}
Aggregations