use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.
the class BinaryHashTableTest method testSparseProbeSpilling.
/*
* Spills build records, so that probe records are also spilled. But only so
* few probe records are used that some partitions remain empty.
*/
@Test
public void testSparseProbeSpilling() throws IOException, MemoryAllocationException {
final int numBuildKeys = 1000000;
final int numBuildVals = 1;
final int numProbeKeys = 20;
final int numProbeVals = 1;
MutableObjectIterator<BinaryRowData> buildInput = new UniformBinaryRowGenerator(numBuildKeys, numBuildVals, false);
MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(128 * PAGE_SIZE).build();
final BinaryHashTable table = newBinaryHashTable(this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 100 * PAGE_SIZE, ioManager);
int expectedNumResults = (Math.min(numProbeKeys, numBuildKeys) * numBuildVals) * numProbeVals;
int numRecordsInJoinResult = join(table, buildInput, new UniformBinaryRowGenerator(numProbeKeys, numProbeVals, true));
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
table.close();
table.free();
}
use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.
the class BinaryHashTableTest method testSpillingHashJoinOneRecursionValidity.
@Test
public void testSpillingHashJoinOneRecursionValidity() throws IOException {
final int numKeys = 1000000;
final int buildValsPerKey = 3;
final int probeValsPerKey = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<BinaryRowData> buildInput = new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<BinaryRowData> probeInput = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
// create the map for validating the results
HashMap<Integer, Long> map = new HashMap<>(numKeys);
// ----------------------------------------------------------------------------------------
MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(896 * PAGE_SIZE).build();
final BinaryHashTable table = newBinaryHashTable(this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 100 * PAGE_SIZE, ioManager);
final BinaryRowData recordReuse = new BinaryRowData(2);
BinaryRowData buildRow = buildSideSerializer.createInstance();
while ((buildRow = buildInput.next(buildRow)) != null) {
table.putBuildRow(buildRow);
}
table.endBuild();
BinaryRowData probeRow = probeSideSerializer.createInstance();
while ((probeRow = probeInput.next(probeRow)) != null) {
if (table.tryProbe(probeRow)) {
testJoin(table, map);
}
}
while (table.nextMatching()) {
testJoin(table, map);
}
table.close();
Assert.assertEquals("Wrong number of keys", numKeys, map.size());
for (Map.Entry<Integer, Long> entry : map.entrySet()) {
long val = entry.getValue();
int key = entry.getKey();
Assert.assertEquals("Wrong number of values in per-key cross product for key " + key, probeValsPerKey * buildValsPerKey, val);
}
// ----------------------------------------------------------------------------------------
table.free();
}
use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.
the class BinaryHashTableTest method testRepeatBuildJoinWithSpill.
@Test
public void testRepeatBuildJoinWithSpill() throws Exception {
final int numKeys = 30000;
final int numRows = 300000;
final int probeValsPerKey = 1;
MutableObjectIterator<BinaryRowData> buildInput = new MutableObjectIterator<BinaryRowData>() {
int cnt = 0;
@Override
public BinaryRowData next(BinaryRowData reuse) throws IOException {
return next();
}
@Override
public BinaryRowData next() throws IOException {
cnt++;
if (cnt > numRows) {
return null;
}
int value = cnt % numKeys;
BinaryRowData row = new BinaryRowData(2);
BinaryRowWriter writer = new BinaryRowWriter(row);
writer.writeInt(0, value);
writer.writeInt(1, value);
writer.complete();
return row;
}
};
MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(35 * PAGE_SIZE).build();
MutableObjectIterator<BinaryRowData> probeInput = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
final BinaryHashTable table = new BinaryHashTable(conf, new Object(), buildSideSerializer, probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 35 * PAGE_SIZE, ioManager, 24, 200000, true, HashJoinType.INNER, null, false, new boolean[] { true }, true);
int numRecordsInJoinResult = join(table, buildInput, probeInput, true);
Assert.assertTrue("Wrong number of records in join result.", numRecordsInJoinResult < numRows);
table.close();
table.free();
}
use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.
the class BinaryHashTableTest method testBinaryHashBucketAreaNotEnoughMem.
@Test
public void testBinaryHashBucketAreaNotEnoughMem() throws IOException {
MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(35 * PAGE_SIZE).build();
BinaryHashTable table = newBinaryHashTable(this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 35 * PAGE_SIZE, ioManager);
BinaryHashBucketArea area = new BinaryHashBucketArea(table, 100, 1, false);
for (int i = 0; i < 100000; i++) {
area.insertToBucket(i, i, true);
}
area.freeMemory();
table.close();
Assert.assertEquals(35, table.getInternalPool().freePages());
}
use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.
the class BinaryHashTableTest method testBucketsNotFulfillSegment.
@Test
public void testBucketsNotFulfillSegment() throws Exception {
final int numKeys = 10000;
final int buildValsPerKey = 3;
final int probeValsPerKey = 10;
// create a build input that gives 30000 pairs with 3 values sharing the same key
MutableObjectIterator<BinaryRowData> buildInput = new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false);
// create a probe input that gives 100000 pairs with 10 values sharing a key
MutableObjectIterator<BinaryRowData> probeInput = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
// allocate the memory for the HashTable
MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(35 * PAGE_SIZE).build();
// ----------------------------------------------------------------------------------------
final BinaryHashTable table = new BinaryHashTable(conf, new Object(), this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 35 * PAGE_SIZE, ioManager, 24, 200000, true, HashJoinType.INNER, null, false, new boolean[] { true }, false);
// For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may
// contains 256 buckets,
// while hash table only assign 250 bucket on it. The unused buffer bytes may contains
// arbitrary data, which may
// influence hash table if forget to skip it. To mock this, put the invalid bucket
// data(partition=1, inMemory=true, count=-1)
// at the end of buffer.
int totalPages = table.getInternalPool().freePages();
for (int i = 0; i < totalPages; i++) {
MemorySegment segment = table.getInternalPool().nextSegment();
int newBucketOffset = segment.size() - 128;
// initialize the header fields
segment.put(newBucketOffset, (byte) 0);
segment.put(newBucketOffset + 1, (byte) 0);
segment.putShort(newBucketOffset + 2, (short) -1);
segment.putLong(newBucketOffset + 4, ~0x0L);
table.returnPage(segment);
}
int numRecordsInJoinResult = join(table, buildInput, probeInput);
Assert.assertEquals("Wrong number of records in join result.", numKeys * buildValsPerKey * probeValsPerKey, numRecordsInJoinResult);
table.close();
table.free();
}
Aggregations