Search in sources :

Example 16 with DiscardingOutputCollector

use of org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector in project flink by apache.

the class AbstractSortMergeOuterJoinIteratorITCase method testOuterJoinWithHighNumberOfCommonKeys.

@SuppressWarnings("unchecked, rawtypes")
protected void testOuterJoinWithHighNumberOfCommonKeys(OuterJoinType outerJoinType, int input1Size, int input1Duplicates, int input1ValueLength, float input1KeyDensity, int input2Size, int input2Duplicates, int input2ValueLength, float input2KeyDensity) {
    TypeSerializer<Tuple2<Integer, String>> serializer1 = new TupleSerializer<>((Class<Tuple2<Integer, String>>) (Class<?>) Tuple2.class, new TypeSerializer<?>[] { IntSerializer.INSTANCE, StringSerializer.INSTANCE });
    TypeSerializer<Tuple2<Integer, String>> serializer2 = new TupleSerializer<>((Class<Tuple2<Integer, String>>) (Class<?>) Tuple2.class, new TypeSerializer<?>[] { IntSerializer.INSTANCE, StringSerializer.INSTANCE });
    TypeComparator<Tuple2<Integer, String>> comparator1 = new TupleComparator<>(new int[] { 0 }, new TypeComparator<?>[] { new IntComparator(true) }, new TypeSerializer<?>[] { IntSerializer.INSTANCE });
    TypeComparator<Tuple2<Integer, String>> comparator2 = new TupleComparator<>(new int[] { 0 }, new TypeComparator<?>[] { new IntComparator(true) }, new TypeSerializer<?>[] { IntSerializer.INSTANCE });
    TypePairComparator<Tuple2<Integer, String>, Tuple2<Integer, String>> pairComparator = new GenericPairComparator<>(comparator1, comparator2);
    this.memoryManager = new MemoryManager(MEMORY_SIZE, 1);
    this.ioManager = new IOManagerAsync();
    final int DUPLICATE_KEY = 13;
    try {
        final TupleGenerator generator1 = new TupleGenerator(SEED1, 500, input1KeyDensity, input1ValueLength, KeyMode.SORTED_SPARSE, ValueMode.RANDOM_LENGTH, null);
        final TupleGenerator generator2 = new TupleGenerator(SEED2, 500, input2KeyDensity, input2ValueLength, KeyMode.SORTED_SPARSE, ValueMode.RANDOM_LENGTH, null);
        final TupleGeneratorIterator gen1Iter = new TupleGeneratorIterator(generator1, input1Size);
        final TupleGeneratorIterator gen2Iter = new TupleGeneratorIterator(generator2, input2Size);
        final TupleConstantValueIterator const1Iter = new TupleConstantValueIterator(DUPLICATE_KEY, "LEFT String for Duplicate Keys", input1Duplicates);
        final TupleConstantValueIterator const2Iter = new TupleConstantValueIterator(DUPLICATE_KEY, "RIGHT String for Duplicate Keys", input2Duplicates);
        final List<MutableObjectIterator<Tuple2<Integer, String>>> inList1 = new ArrayList<>();
        inList1.add(gen1Iter);
        inList1.add(const1Iter);
        final List<MutableObjectIterator<Tuple2<Integer, String>>> inList2 = new ArrayList<>();
        inList2.add(gen2Iter);
        inList2.add(const2Iter);
        MutableObjectIterator<Tuple2<Integer, String>> input1 = new MergeIterator<>(inList1, comparator1.duplicate());
        MutableObjectIterator<Tuple2<Integer, String>> input2 = new MergeIterator<>(inList2, comparator2.duplicate());
        // collect expected data
        final Map<Integer, Collection<Match>> expectedMatchesMap = joinValues(collectData(input1), collectData(input2), outerJoinType);
        // re-create the whole thing for actual processing
        // reset the generators and iterators
        generator1.reset();
        generator2.reset();
        const1Iter.reset();
        const2Iter.reset();
        gen1Iter.reset();
        gen2Iter.reset();
        inList1.clear();
        inList1.add(gen1Iter);
        inList1.add(const1Iter);
        inList2.clear();
        inList2.add(gen2Iter);
        inList2.add(const2Iter);
        input1 = new MergeIterator<>(inList1, comparator1.duplicate());
        input2 = new MergeIterator<>(inList2, comparator2.duplicate());
        final FlatJoinFunction<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> joinFunction = new MatchRemovingJoiner(expectedMatchesMap);
        final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
        // we create this sort-merge iterator with little memory for the block-nested-loops fall-back to make sure it
        // needs to spill for the duplicate keys
        AbstractMergeOuterJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = createOuterJoinIterator(outerJoinType, input1, input2, serializer1, comparator1, serializer2, comparator2, pairComparator, this.memoryManager, this.ioManager, PAGES_FOR_BNLJN, this.parentTask);
        iterator.open();
        while (iterator.callWithNextKey(joinFunction, collector)) ;
        iterator.close();
        // assert that each expected match was seen
        for (Entry<Integer, Collection<Match>> entry : expectedMatchesMap.entrySet()) {
            if (!entry.getValue().isEmpty()) {
                Assert.fail("Collection for key " + entry.getKey() + " is not empty");
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("An exception occurred during the test: " + e.getMessage());
    }
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) ResettableMutableObjectIterator(org.apache.flink.runtime.util.ResettableMutableObjectIterator) ArrayList(java.util.ArrayList) IntComparator(org.apache.flink.api.common.typeutils.base.IntComparator) TupleComparator(org.apache.flink.api.java.typeutils.runtime.TupleComparator) MatchRemovingJoiner(org.apache.flink.runtime.operators.testutils.MatchRemovingJoiner) TupleSerializer(org.apache.flink.api.java.typeutils.runtime.TupleSerializer) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) GenericPairComparator(org.apache.flink.api.common.typeutils.GenericPairComparator) TupleConstantValueIterator(org.apache.flink.runtime.operators.testutils.TestData.TupleConstantValueIterator) TupleGenerator(org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) TupleGeneratorIterator(org.apache.flink.runtime.operators.testutils.TestData.TupleGeneratorIterator) DiscardingOutputCollector(org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Collection(java.util.Collection)

Example 17 with DiscardingOutputCollector

use of org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector in project flink by apache.

the class HashVsSortMiniBenchmark method testBuildSecond.

@Test
public void testBuildSecond() {
    try {
        TestData.TupleGenerator generator1 = new TestData.TupleGenerator(SEED1, INPUT_1_SIZE / 10, 100, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
        TestData.TupleGenerator generator2 = new TestData.TupleGenerator(SEED2, INPUT_2_SIZE, 100, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
        final TestData.TupleGeneratorIterator input1 = new TestData.TupleGeneratorIterator(generator1, INPUT_1_SIZE);
        final TestData.TupleGeneratorIterator input2 = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
        final FlatJoinFunction matcher = new NoOpMatcher();
        final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
        long start = System.nanoTime();
        // compare with iterator values
        ReusingBuildSecondHashJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new ReusingBuildSecondHashJoinIterator<>(input1, input2, this.serializer1.getSerializer(), this.comparator1, this.serializer2.getSerializer(), this.comparator2, this.pairComparator11, this.memoryManager, this.ioManager, this.parentTask, 1, false, false, true);
        iterator.open();
        while (iterator.callWithNextKey(matcher, collector)) ;
        iterator.close();
        long elapsed = System.nanoTime() - start;
        double msecs = elapsed / (1000 * 1000);
        System.out.println("Hash Build Second took " + msecs + " msecs.");
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("An exception occurred during the test: " + e.getMessage());
    }
}
Also used : ReusingBuildSecondHashJoinIterator(org.apache.flink.runtime.operators.hash.ReusingBuildSecondHashJoinIterator) TestData(org.apache.flink.runtime.operators.testutils.TestData) FlatJoinFunction(org.apache.flink.api.common.functions.FlatJoinFunction) DiscardingOutputCollector(org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Test(org.junit.Test)

Example 18 with DiscardingOutputCollector

use of org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector in project flink by apache.

the class HashVsSortMiniBenchmark method testSortBothMerge.

@Test
public void testSortBothMerge() {
    try {
        TestData.TupleGenerator generator1 = new TestData.TupleGenerator(SEED1, INPUT_1_SIZE / 10, 100, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
        TestData.TupleGenerator generator2 = new TestData.TupleGenerator(SEED2, INPUT_2_SIZE, 100, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
        final TestData.TupleGeneratorIterator input1 = new TestData.TupleGeneratorIterator(generator1, INPUT_1_SIZE);
        final TestData.TupleGeneratorIterator input2 = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
        final FlatJoinFunction matcher = new NoOpMatcher();
        final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
        long start = System.nanoTime();
        final UnilateralSortMerger<Tuple2<Integer, String>> sorter1 = new UnilateralSortMerger<>(this.memoryManager, this.ioManager, input1, this.parentTask, this.serializer1, this.comparator1.duplicate(), (double) MEMORY_FOR_SORTER / MEMORY_SIZE, 128, 0.8f, true, /*use large record handler*/
        true);
        final UnilateralSortMerger<Tuple2<Integer, String>> sorter2 = new UnilateralSortMerger<>(this.memoryManager, this.ioManager, input2, this.parentTask, this.serializer2, this.comparator2.duplicate(), (double) MEMORY_FOR_SORTER / MEMORY_SIZE, 128, 0.8f, true, /*use large record handler*/
        true);
        final MutableObjectIterator<Tuple2<Integer, String>> sortedInput1 = sorter1.getIterator();
        final MutableObjectIterator<Tuple2<Integer, String>> sortedInput2 = sorter2.getIterator();
        // compare with iterator values
        ReusingMergeInnerJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new ReusingMergeInnerJoinIterator<>(sortedInput1, sortedInput2, this.serializer1.getSerializer(), this.comparator1, this.serializer2.getSerializer(), this.comparator2, this.pairComparator11, this.memoryManager, this.ioManager, MEMORY_PAGES_FOR_MERGE, this.parentTask);
        iterator.open();
        while (iterator.callWithNextKey(matcher, collector)) ;
        iterator.close();
        sorter1.close();
        sorter2.close();
        long elapsed = System.nanoTime() - start;
        double msecs = elapsed / (1000 * 1000);
        System.out.println("Sort-Merge Took " + msecs + " msecs.");
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("An exception occurred during the test: " + e.getMessage());
    }
}
Also used : TestData(org.apache.flink.runtime.operators.testutils.TestData) FlatJoinFunction(org.apache.flink.api.common.functions.FlatJoinFunction) ReusingMergeInnerJoinIterator(org.apache.flink.runtime.operators.sort.ReusingMergeInnerJoinIterator) DiscardingOutputCollector(org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector) Tuple2(org.apache.flink.api.java.tuple.Tuple2) UnilateralSortMerger(org.apache.flink.runtime.operators.sort.UnilateralSortMerger) Test(org.junit.Test)

Example 19 with DiscardingOutputCollector

use of org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector in project flink by apache.

the class HashVsSortMiniBenchmark method testBuildFirst.

@Test
public void testBuildFirst() {
    try {
        TestData.TupleGenerator generator1 = new TestData.TupleGenerator(SEED1, INPUT_1_SIZE / 10, 100, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
        TestData.TupleGenerator generator2 = new TestData.TupleGenerator(SEED2, INPUT_2_SIZE, 100, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
        final TestData.TupleGeneratorIterator input1 = new TestData.TupleGeneratorIterator(generator1, INPUT_1_SIZE);
        final TestData.TupleGeneratorIterator input2 = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
        final FlatJoinFunction matcher = new NoOpMatcher();
        final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
        long start = System.nanoTime();
        // compare with iterator values
        final ReusingBuildFirstHashJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new ReusingBuildFirstHashJoinIterator<>(input1, input2, this.serializer1.getSerializer(), this.comparator1, this.serializer2.getSerializer(), this.comparator2, this.pairComparator11, this.memoryManager, this.ioManager, this.parentTask, 1, false, false, true);
        iterator.open();
        while (iterator.callWithNextKey(matcher, collector)) ;
        iterator.close();
        long elapsed = System.nanoTime() - start;
        double msecs = elapsed / (1000 * 1000);
        System.out.println("Hash Build First Took " + msecs + " msecs.");
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("An exception occurred during the test: " + e.getMessage());
    }
}
Also used : TestData(org.apache.flink.runtime.operators.testutils.TestData) FlatJoinFunction(org.apache.flink.api.common.functions.FlatJoinFunction) DiscardingOutputCollector(org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ReusingBuildFirstHashJoinIterator(org.apache.flink.runtime.operators.hash.ReusingBuildFirstHashJoinIterator) Test(org.junit.Test)

Example 20 with DiscardingOutputCollector

use of org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector in project flink by apache.

the class NonReusingSortMergeInnerJoinIteratorITCase method testMergeWithHighNumberOfCommonKeys.

@Test
public void testMergeWithHighNumberOfCommonKeys() {
    // the size of the left and right inputs
    final int INPUT_1_SIZE = 200;
    final int INPUT_2_SIZE = 100;
    final int INPUT_1_DUPLICATES = 10;
    final int INPUT_2_DUPLICATES = 4000;
    final int DUPLICATE_KEY = 13;
    try {
        final TupleGenerator generator1 = new TupleGenerator(SEED1, 500, 4096, KeyMode.SORTED, ValueMode.RANDOM_LENGTH);
        final TupleGenerator generator2 = new TupleGenerator(SEED2, 500, 2048, KeyMode.SORTED, ValueMode.RANDOM_LENGTH);
        final TestData.TupleGeneratorIterator gen1Iter = new TestData.TupleGeneratorIterator(generator1, INPUT_1_SIZE);
        final TestData.TupleGeneratorIterator gen2Iter = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
        final TestData.TupleConstantValueIterator const1Iter = new TestData.TupleConstantValueIterator(DUPLICATE_KEY, "LEFT String for Duplicate Keys", INPUT_1_DUPLICATES);
        final TestData.TupleConstantValueIterator const2Iter = new TestData.TupleConstantValueIterator(DUPLICATE_KEY, "RIGHT String for Duplicate Keys", INPUT_2_DUPLICATES);
        final List<MutableObjectIterator<Tuple2<Integer, String>>> inList1 = new ArrayList<MutableObjectIterator<Tuple2<Integer, String>>>();
        inList1.add(gen1Iter);
        inList1.add(const1Iter);
        final List<MutableObjectIterator<Tuple2<Integer, String>>> inList2 = new ArrayList<MutableObjectIterator<Tuple2<Integer, String>>>();
        inList2.add(gen2Iter);
        inList2.add(const2Iter);
        MutableObjectIterator<Tuple2<Integer, String>> input1 = new MergeIterator<Tuple2<Integer, String>>(inList1, comparator1.duplicate());
        MutableObjectIterator<Tuple2<Integer, String>> input2 = new MergeIterator<Tuple2<Integer, String>>(inList2, comparator2.duplicate());
        // collect expected data
        final Map<Integer, Collection<Match>> expectedMatchesMap = matchValues(collectData(input1), collectData(input2));
        // re-create the whole thing for actual processing
        // reset the generators and iterators
        generator1.reset();
        generator2.reset();
        const1Iter.reset();
        const2Iter.reset();
        gen1Iter.reset();
        gen2Iter.reset();
        inList1.clear();
        inList1.add(gen1Iter);
        inList1.add(const1Iter);
        inList2.clear();
        inList2.add(gen2Iter);
        inList2.add(const2Iter);
        input1 = new MergeIterator<Tuple2<Integer, String>>(inList1, comparator1.duplicate());
        input2 = new MergeIterator<Tuple2<Integer, String>>(inList2, comparator2.duplicate());
        final FlatJoinFunction<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> joinFunction = new MatchRemovingJoiner(expectedMatchesMap);
        final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<Tuple2<Integer, String>>();
        // we create this sort-merge iterator with little memory for the block-nested-loops fall-back to make sure it
        // needs to spill for the duplicate keys
        NonReusingMergeInnerJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new NonReusingMergeInnerJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>>(input1, input2, this.serializer1, this.comparator1, this.serializer2, this.comparator2, this.pairComparator, this.memoryManager, this.ioManager, PAGES_FOR_BNLJN, this.parentTask);
        iterator.open();
        while (iterator.callWithNextKey(joinFunction, collector)) ;
        iterator.close();
        // assert that each expected match was seen
        for (Entry<Integer, Collection<Match>> entry : expectedMatchesMap.entrySet()) {
            if (!entry.getValue().isEmpty()) {
                Assert.fail("Collection for key " + entry.getKey() + " is not empty");
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("An exception occurred during the test: " + e.getMessage());
    }
}
Also used : TestData(org.apache.flink.runtime.operators.testutils.TestData) MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) ArrayList(java.util.ArrayList) MatchRemovingJoiner(org.apache.flink.runtime.operators.testutils.MatchRemovingJoiner) TupleGenerator(org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator) DiscardingOutputCollector(org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Collection(java.util.Collection) Test(org.junit.Test)

Aggregations

Tuple2 (org.apache.flink.api.java.tuple.Tuple2)34 DiscardingOutputCollector (org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector)34 Collection (java.util.Collection)31 TestData (org.apache.flink.runtime.operators.testutils.TestData)31 Test (org.junit.Test)31 NullKeyFieldException (org.apache.flink.types.NullKeyFieldException)24 TupleGenerator (org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator)17 FlatJoinFunction (org.apache.flink.api.common.functions.FlatJoinFunction)15 ArrayList (java.util.ArrayList)9 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)7 MatchRemovingJoiner (org.apache.flink.runtime.operators.testutils.MatchRemovingJoiner)5 UniformIntPairGenerator (org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator)4 UnionIterator (org.apache.flink.runtime.operators.testutils.UnionIterator)4 IntPair (org.apache.flink.runtime.operators.testutils.types.IntPair)4 Map (java.util.Map)2 TupleMatch (org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatch)2 TupleMatchRemovingJoin (org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatchRemovingJoin)2 GenericPairComparator (org.apache.flink.api.common.typeutils.GenericPairComparator)1 IntComparator (org.apache.flink.api.common.typeutils.base.IntComparator)1 TupleComparator (org.apache.flink.api.java.typeutils.runtime.TupleComparator)1