Search in sources :

Example 16 with UniformBinaryRowGenerator

use of org.apache.flink.table.runtime.util.UniformBinaryRowGenerator in project flink by apache.

the class BinaryHashTableTest method testFailingHashJoinTooManyRecursions.

/*
     * This test is basically identical to the "testSpillingHashJoinWithMassiveCollisions" test, only that the number
     * of repeated values (causing bucket collisions) are large enough to make sure that their target partition no longer
     * fits into memory by itself and needs to be repartitioned in the recursion again.
     */
@Test
public void testFailingHashJoinTooManyRecursions() throws IOException {
    // the following two values are known to have a hash-code collision on the first recursion
    // level.
    // we use them to make sure one partition grows over-proportionally large
    final int repeatedValue1 = 40559;
    final int repeatedValue2 = 92882;
    final int repeatedValueCount = 3000000;
    final int numKeys = 1000000;
    final int buildValsPerKey = 3;
    final int probeValsPerKey = 10;
    // create a build input that gives 3 million pairs with 3 values sharing the same key, plus
    // 400k pairs with two colliding keys
    MutableObjectIterator<BinaryRowData> build1 = new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false);
    MutableObjectIterator<BinaryRowData> build2 = new ConstantsKeyValuePairsIterator(repeatedValue1, 17, repeatedValueCount);
    MutableObjectIterator<BinaryRowData> build3 = new ConstantsKeyValuePairsIterator(repeatedValue2, 23, repeatedValueCount);
    List<MutableObjectIterator<BinaryRowData>> builds = new ArrayList<>();
    builds.add(build1);
    builds.add(build2);
    builds.add(build3);
    MutableObjectIterator<BinaryRowData> buildInput = new UnionIterator<>(builds);
    // create a probe input that gives 10 million pairs with 10 values sharing a key
    MutableObjectIterator<BinaryRowData> probe1 = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
    MutableObjectIterator<BinaryRowData> probe2 = new ConstantsKeyValuePairsIterator(repeatedValue1, 17, repeatedValueCount);
    MutableObjectIterator<BinaryRowData> probe3 = new ConstantsKeyValuePairsIterator(repeatedValue2, 23, repeatedValueCount);
    List<MutableObjectIterator<BinaryRowData>> probes = new ArrayList<>();
    probes.add(probe1);
    probes.add(probe2);
    probes.add(probe3);
    MutableObjectIterator<BinaryRowData> probeInput = new UnionIterator<>(probes);
    // ----------------------------------------------------------------------------------------
    MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(896 * PAGE_SIZE).build();
    final BinaryHashTable table = newBinaryHashTable(this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 896 * PAGE_SIZE, ioManager);
    try {
        join(table, buildInput, probeInput);
        fail("Hash Join must have failed due to too many recursions.");
    } catch (Exception ex) {
    // expected
    }
    table.close();
    // ----------------------------------------------------------------------------------------
    table.free();
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) UnionIterator(org.apache.flink.runtime.operators.testutils.UnionIterator) ArrayList(java.util.ArrayList) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) MemoryAllocationException(org.apache.flink.runtime.memory.MemoryAllocationException) IOException(java.io.IOException) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) UniformBinaryRowGenerator(org.apache.flink.table.runtime.util.UniformBinaryRowGenerator) Test(org.junit.Test)

Example 17 with UniformBinaryRowGenerator

use of org.apache.flink.table.runtime.util.UniformBinaryRowGenerator in project flink by apache.

the class LongHashTableTest method validateSpillingDuringInsertion.

@Test
public void validateSpillingDuringInsertion() throws IOException, MemoryAllocationException {
    final int numBuildKeys = 500000;
    final int numBuildVals = 1;
    final int numProbeKeys = 10;
    final int numProbeVals = 1;
    MutableObjectIterator<BinaryRowData> buildInput = new UniformBinaryRowGenerator(numBuildKeys, numBuildVals, false);
    final MyHashTable table = new MyHashTable(85 * PAGE_SIZE);
    int expectedNumResults = (Math.min(numProbeKeys, numBuildKeys) * numBuildVals) * numProbeVals;
    int numRecordsInJoinResult = join(table, buildInput, new UniformBinaryRowGenerator(numProbeKeys, numProbeVals, true));
    Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
    table.close();
    table.free();
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) UniformBinaryRowGenerator(org.apache.flink.table.runtime.util.UniformBinaryRowGenerator) Test(org.junit.Test)

Example 18 with UniformBinaryRowGenerator

use of org.apache.flink.table.runtime.util.UniformBinaryRowGenerator in project flink by apache.

the class BinaryHashTableTest method testSpillingHashJoinWithMassiveCollisions.

@Test
public void testSpillingHashJoinWithMassiveCollisions() throws IOException {
    // the following two values are known to have a hash-code collision on the initial level.
    // we use them to make sure one partition grows over-proportionally large
    final int repeatedValue1 = 40559;
    final int repeatedValue2 = 92882;
    final int repeatedValueCountBuild = 200000;
    final int repeatedValueCountProbe = 5;
    final int numKeys = 1000000;
    final int buildValsPerKey = 3;
    final int probeValsPerKey = 10;
    // create a build input that gives 3 million pairs with 3 values sharing the same key, plus
    // 400k pairs with two colliding keys
    MutableObjectIterator<BinaryRowData> build1 = new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false);
    MutableObjectIterator<BinaryRowData> build2 = new ConstantsKeyValuePairsIterator(repeatedValue1, 17, repeatedValueCountBuild);
    MutableObjectIterator<BinaryRowData> build3 = new ConstantsKeyValuePairsIterator(repeatedValue2, 23, repeatedValueCountBuild);
    List<MutableObjectIterator<BinaryRowData>> builds = new ArrayList<>();
    builds.add(build1);
    builds.add(build2);
    builds.add(build3);
    MutableObjectIterator<BinaryRowData> buildInput = new UnionIterator<>(builds);
    // create a probe input that gives 10 million pairs with 10 values sharing a key
    MutableObjectIterator<BinaryRowData> probe1 = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
    MutableObjectIterator<BinaryRowData> probe2 = new ConstantsKeyValuePairsIterator(repeatedValue1, 17, 5);
    MutableObjectIterator<BinaryRowData> probe3 = new ConstantsKeyValuePairsIterator(repeatedValue2, 23, 5);
    List<MutableObjectIterator<BinaryRowData>> probes = new ArrayList<>();
    probes.add(probe1);
    probes.add(probe2);
    probes.add(probe3);
    MutableObjectIterator<BinaryRowData> probeInput = new UnionIterator<>(probes);
    // create the map for validating the results
    HashMap<Integer, Long> map = new HashMap<>(numKeys);
    MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(896 * PAGE_SIZE).build();
    // ----------------------------------------------------------------------------------------
    final BinaryHashTable table = newBinaryHashTable(this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 896 * PAGE_SIZE, ioManager);
    final BinaryRowData recordReuse = new BinaryRowData(2);
    BinaryRowData buildRow = buildSideSerializer.createInstance();
    while ((buildRow = buildInput.next(buildRow)) != null) {
        table.putBuildRow(buildRow);
    }
    table.endBuild();
    BinaryRowData probeRow = probeSideSerializer.createInstance();
    while ((probeRow = probeInput.next(probeRow)) != null) {
        if (table.tryProbe(probeRow)) {
            testJoin(table, map);
        }
    }
    while (table.nextMatching()) {
        testJoin(table, map);
    }
    table.close();
    Assert.assertEquals("Wrong number of keys", numKeys, map.size());
    for (Map.Entry<Integer, Long> entry : map.entrySet()) {
        long val = entry.getValue();
        int key = entry.getKey();
        Assert.assertEquals("Wrong number of values in per-key cross product for key " + key, (key == repeatedValue1 || key == repeatedValue2) ? (probeValsPerKey + repeatedValueCountProbe) * (buildValsPerKey + repeatedValueCountBuild) : probeValsPerKey * buildValsPerKey, val);
    }
    // ----------------------------------------------------------------------------------------
    table.free();
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) UnionIterator(org.apache.flink.runtime.operators.testutils.UnionIterator) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) HashMap(java.util.HashMap) Map(java.util.Map) UniformBinaryRowGenerator(org.apache.flink.table.runtime.util.UniformBinaryRowGenerator) Test(org.junit.Test)

Example 19 with UniformBinaryRowGenerator

use of org.apache.flink.table.runtime.util.UniformBinaryRowGenerator in project flink by apache.

the class BinaryHashTableTest method testRepeatBuildJoin.

@Test
public void testRepeatBuildJoin() throws Exception {
    final int numKeys = 500;
    final int probeValsPerKey = 1;
    MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(40 * PAGE_SIZE).build();
    MutableObjectIterator<BinaryRowData> buildInput = new MutableObjectIterator<BinaryRowData>() {

        int cnt = 0;

        @Override
        public BinaryRowData next(BinaryRowData reuse) throws IOException {
            return next();
        }

        @Override
        public BinaryRowData next() throws IOException {
            cnt++;
            if (cnt > numKeys) {
                return null;
            }
            BinaryRowData row = new BinaryRowData(2);
            BinaryRowWriter writer = new BinaryRowWriter(row);
            writer.writeInt(0, 1);
            writer.writeInt(1, 1);
            writer.complete();
            return row;
        }
    };
    MutableObjectIterator<BinaryRowData> probeInput = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
    final BinaryHashTable table = new BinaryHashTable(conf, new Object(), buildSideSerializer, probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 40 * PAGE_SIZE, ioManager, 24, 200000, true, HashJoinType.INNER, null, false, new boolean[] { true }, true);
    int numRecordsInJoinResult = join(table, buildInput, probeInput, true);
    Assert.assertEquals("Wrong number of records in join result.", 1, numRecordsInJoinResult);
    table.close();
    table.free();
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) BinaryRowWriter(org.apache.flink.table.data.writer.BinaryRowWriter) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) UniformBinaryRowGenerator(org.apache.flink.table.runtime.util.UniformBinaryRowGenerator) Test(org.junit.Test)

Example 20 with UniformBinaryRowGenerator

use of org.apache.flink.table.runtime.util.UniformBinaryRowGenerator in project flink by apache.

the class BinaryHashTableTest method validateSpillingDuringInsertion.

/*
     * This test validates a bug fix against former memory loss in the case where a partition was spilled
     * during an insert into the same.
     */
@Test
public void validateSpillingDuringInsertion() throws IOException, MemoryAllocationException {
    final int numBuildKeys = 500000;
    final int numBuildVals = 1;
    final int numProbeKeys = 10;
    final int numProbeVals = 1;
    MutableObjectIterator<BinaryRowData> buildInput = new UniformBinaryRowGenerator(numBuildKeys, numBuildVals, false);
    MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(85 * PAGE_SIZE).build();
    final BinaryHashTable table = newBinaryHashTable(this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 85 * PAGE_SIZE, ioManager);
    int expectedNumResults = (Math.min(numProbeKeys, numBuildKeys) * numBuildVals) * numProbeVals;
    int numRecordsInJoinResult = join(table, buildInput, new UniformBinaryRowGenerator(numProbeKeys, numProbeVals, true));
    Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
    table.close();
    table.free();
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) UniformBinaryRowGenerator(org.apache.flink.table.runtime.util.UniformBinaryRowGenerator) Test(org.junit.Test)

Aggregations

BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)21 UniformBinaryRowGenerator (org.apache.flink.table.runtime.util.UniformBinaryRowGenerator)21 Test (org.junit.Test)21 MemoryManager (org.apache.flink.runtime.memory.MemoryManager)14 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)7 HashMap (java.util.HashMap)6 Map (java.util.Map)6 ArrayList (java.util.ArrayList)5 UnionIterator (org.apache.flink.runtime.operators.testutils.UnionIterator)5 StreamOperator (org.apache.flink.streaming.api.operators.StreamOperator)2 BinaryRowWriter (org.apache.flink.table.data.writer.BinaryRowWriter)2 IOException (java.io.IOException)1 MemorySegment (org.apache.flink.core.memory.MemorySegment)1 MemoryAllocationException (org.apache.flink.runtime.memory.MemoryAllocationException)1