use of org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator in project flink by apache.
the class HashTableITCase method testInMemoryReOpenWithSmallMemory.
/*
* This test is same as `testInMemoryReOpen()` but only number of keys and pages are different. This test
* validates a bug fix MutableHashTable memory leakage with small memory segments.
*/
@Test
public void testInMemoryReOpenWithSmallMemory() throws Exception {
final int NUM_KEYS = 10000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 30000 pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 100000 pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
// 33 is minimum number of pages required to perform hash join this inputs
memSegments = this.memManager.allocatePages(MEM_OWNER, 33);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
// recreate the inputs
// create a build input that gives 30000 pairs with 3 values sharing the same key
buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 100000 pairs with 10 values sharing a key
probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
join.open(buildInput, probeInput);
numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator in project flink by apache.
the class HashTableITCase method testSparseProbeSpillingIntPair.
/*
* Spills build records, so that probe records are also spilled. But only so
* few probe records are used that some partitions remain empty.
*/
@Test
public void testSparseProbeSpillingIntPair() throws IOException, MemoryAllocationException {
final int NUM_BUILD_KEYS = 1000000;
final int NUM_BUILD_VALS = 1;
final int NUM_PROBE_KEYS = 20;
final int NUM_PROBE_VALS = 1;
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 128);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
join.open(buildInput, new UniformIntPairGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS) * NUM_PROBE_VALS;
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator in project flink by apache.
the class HashTableITCase method testHashWithBuildSideOuterJoin1.
@Test
public void testHashWithBuildSideOuterJoin1() throws Exception {
final int NUM_KEYS = 20000;
final int BUILD_VALS_PER_KEY = 1;
final int PROBE_VALS_PER_KEY = 1;
// create a build input that gives 40000 pairs with 1 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(2 * NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 20000 pairs with 1 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
// 33 is minimum number of pages required to perform hash join this inputs
memSegments = this.memManager.allocatePages(MEM_OWNER, 33);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
join.open(buildInput, probeInput, true);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", 2 * NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator in project flink by apache.
the class HashTableITCase method testSpillingHashJoinOneRecursionValidityIntPair.
@Test
public void testSpillingHashJoinOneRecursionValidityIntPair() throws IOException {
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// create the I/O access for spilling
IOManager ioManager = new IOManagerAsync();
// create the map for validating the results
HashMap<Integer, Long> map = new HashMap<Integer, Long>(NUM_KEYS);
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
join.open(buildInput, probeInput);
IntPair record;
final IntPair recordReuse = new IntPair();
while (join.nextRecord()) {
int numBuildValues = 0;
int key = 0;
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
if ((record = buildSide.next(recordReuse)) != null) {
numBuildValues = 1;
key = record.getKey();
} else {
fail("No build side values found for a probe key.");
}
while ((record = buildSide.next(recordReuse)) != null) {
numBuildValues++;
}
if (numBuildValues != 3) {
fail("Other than 3 build values!!!");
}
IntPair pr = join.getCurrentProbeRecord();
Assert.assertEquals("Probe-side key was different than build-side key.", key, pr.getKey());
Long contained = map.get(key);
if (contained == null) {
contained = Long.valueOf(numBuildValues);
} else {
contained = Long.valueOf(contained.longValue() + (numBuildValues));
}
map.put(key, contained);
}
join.close();
Assert.assertEquals("Wrong number of keys", NUM_KEYS, map.size());
for (Map.Entry<Integer, Long> entry : map.entrySet()) {
long val = entry.getValue();
int key = entry.getKey();
Assert.assertEquals("Wrong number of values in per-key cross product for key " + key, PROBE_VALS_PER_KEY * BUILD_VALS_PER_KEY, val);
}
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator in project flink by apache.
the class ReusingHashJoinIteratorITCase method testBuildSecondWithMixedDataTypes.
@Test
public void testBuildSecondWithMixedDataTypes() {
try {
MutableObjectIterator<IntPair> input1 = new UniformIntPairGenerator(500, 40, false);
final TestData.TupleGenerator generator2 = new TestData.TupleGenerator(SEED2, 500, 2048, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final TestData.TupleGeneratorIterator input2 = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
// collect expected data
final Map<Integer, Collection<TupleIntPairMatch>> expectedMatchesMap = joinIntPairs(collectIntPairData(input1), collectTupleData(input2));
final FlatJoinFunction<IntPair, Tuple2<Integer, String>, Tuple2<Integer, String>> matcher = new TupleIntPairMatchRemovingMatcher(expectedMatchesMap);
final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
// reset the generators
input1 = new UniformIntPairGenerator(500, 40, false);
generator2.reset();
input2.reset();
// compare with iterator values
ReusingBuildFirstHashJoinIterator<IntPair, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new ReusingBuildFirstHashJoinIterator<>(input1, input2, this.pairSerializer, this.pairComparator, this.recordSerializer, this.record2Comparator, this.recordPairPairComparator, this.memoryManager, this.ioManager, this.parentTask, 1.0, false, false, true);
iterator.open();
while (iterator.callWithNextKey(matcher, collector)) ;
iterator.close();
// assert that each expected match was seen
for (Entry<Integer, Collection<TupleIntPairMatch>> entry : expectedMatchesMap.entrySet()) {
if (!entry.getValue().isEmpty()) {
Assert.fail("Collection for key " + entry.getKey() + " is not empty");
}
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail("An exception occurred during the test: " + e.getMessage());
}
}
Aggregations