use of org.apache.flink.runtime.operators.testutils.types.IntPair in project flink by apache.
the class HashTableITCase method testSpillingHashJoinOneRecursionPerformanceIntPair.
@Test
public void testSpillingHashJoinOneRecursionPerformanceIntPair() throws IOException {
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.runtime.operators.testutils.types.IntPair in project flink by apache.
the class CompactingHashTableTest method testDoubleResize.
@Test
public void testDoubleResize() {
// Only CompactingHashTable
try {
final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
final Random rnd = new Random(RANDOM_SEED);
final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
table.open();
for (int i = 0; i < NUM_PAIRS; i++) {
table.insert(pairs[i]);
}
AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
IntPair target = new IntPair();
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
// make sure there is enough memory for resize
memory.addAll(getMemory(ADDITIONAL_MEM));
Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
assertTrue(b);
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
// make sure there is enough memory for resize
memory.addAll(getMemory(ADDITIONAL_MEM));
b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
assertTrue(b);
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
table.close();
assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size());
} catch (Exception e) {
e.printStackTrace();
fail("Error: " + e.getMessage());
}
}
use of org.apache.flink.runtime.operators.testutils.types.IntPair in project flink by apache.
the class CompactingHashTableTest method testResize.
@Test
public void testResize() {
// Only CompactingHashTable
try {
final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
final Random rnd = new Random(RANDOM_SEED);
final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
table.open();
for (int i = 0; i < NUM_PAIRS; i++) {
table.insert(pairs[i]);
}
AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
IntPair target = new IntPair();
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
// make sure there is enough memory for resize
memory.addAll(getMemory(ADDITIONAL_MEM));
Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
assertTrue(b);
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
table.close();
assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size());
} catch (Exception e) {
e.printStackTrace();
fail("Error: " + e.getMessage());
}
}
use of org.apache.flink.runtime.operators.testutils.types.IntPair in project flink by apache.
the class HashTableITCase method validateSpillingDuringInsertionIntPair.
/*
* This test validates a bug fix against former memory loss in the case where a partition was spilled
* during an insert into the same.
*/
@Test
public void validateSpillingDuringInsertionIntPair() throws IOException, MemoryAllocationException {
final int NUM_BUILD_KEYS = 500000;
final int NUM_BUILD_VALS = 1;
final int NUM_PROBE_KEYS = 10;
final int NUM_PROBE_VALS = 1;
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 85);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
join.open(buildInput, new UniformIntPairGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS) * NUM_PROBE_VALS;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.runtime.operators.testutils.types.IntPair in project flink by apache.
the class FixedLengthRecordSorterTest method testReset.
@Test
public void testReset() throws Exception {
final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
RandomIntPairGenerator generator = new RandomIntPairGenerator(SEED);
// write the buffer full with the first set of records
IntPair record = new IntPair();
int num = -1;
do {
generator.next(record);
num++;
} while (sorter.write(record) && num < 3354624);
sorter.reset();
// write a second sequence of records. since the values are of fixed length, we must be able to write an equal number
generator.reset();
// write the buffer full with the first set of records
int num2 = -1;
do {
generator.next(record);
num2++;
} while (sorter.write(record) && num2 < 3354624);
Assert.assertEquals("The number of records written after the reset was not the same as before.", num, num2);
// re-read the records
generator.reset();
IntPair readTarget = new IntPair();
int i = 0;
while (i < num) {
generator.next(record);
readTarget = sorter.getRecord(readTarget, i++);
int rk = readTarget.getKey();
int gk = record.getKey();
int rv = readTarget.getValue();
int gv = record.getValue();
Assert.assertEquals("The re-read key is wrong", gk, rk);
Assert.assertEquals("The re-read value is wrong", gv, rv);
}
// release the memory occupied by the buffers
sorter.dispose();
this.memoryManager.release(memory);
}
Aggregations