Search in sources :

Example 16 with MutableObjectIterator

use of org.apache.flink.util.MutableObjectIterator in project flink by apache.

the class IterationHeadTask method run.

@Override
public void run() throws Exception {
    final String brokerKey = brokerKey();
    final int workerIndex = getEnvironment().getTaskInfo().getIndexOfThisSubtask();
    final boolean objectSolutionSet = config.isSolutionSetUnmanaged();
    // if workset iteration
    CompactingHashTable<X> solutionSet = null;
    // if workset iteration with unmanaged solution set
    JoinHashMap<X> solutionSetObjectMap = null;
    boolean waitForSolutionSetUpdate = config.getWaitForSolutionSetUpdate();
    boolean isWorksetIteration = config.getIsWorksetIteration();
    try {
        /* used for receiving the current iteration result from iteration tail */
        SuperstepKickoffLatch nextStepKickoff = new SuperstepKickoffLatch();
        SuperstepKickoffLatchBroker.instance().handIn(brokerKey, nextStepKickoff);
        BlockingBackChannel backChannel = initBackChannel();
        SuperstepBarrier barrier = initSuperstepBarrier();
        SolutionSetUpdateBarrier solutionSetUpdateBarrier = null;
        feedbackDataInput = config.getIterationHeadPartialSolutionOrWorksetInputIndex();
        feedbackTypeSerializer = this.getInputSerializer(feedbackDataInput);
        excludeFromReset(feedbackDataInput);
        int initialSolutionSetInput;
        if (isWorksetIteration) {
            initialSolutionSetInput = config.getIterationHeadSolutionSetInputIndex();
            solutionTypeSerializer = config.getSolutionSetSerializer(getUserCodeClassLoader());
            // setup the index for the solution set
            @SuppressWarnings("unchecked") MutableObjectIterator<X> solutionSetInput = (MutableObjectIterator<X>) createInputIterator(inputReaders[initialSolutionSetInput], solutionTypeSerializer);
            // read the initial solution set
            if (objectSolutionSet) {
                solutionSetObjectMap = initJoinHashMap();
                readInitialSolutionSet(solutionSetObjectMap, solutionSetInput);
                SolutionSetBroker.instance().handIn(brokerKey, solutionSetObjectMap);
            } else {
                solutionSet = initCompactingHashTable();
                readInitialSolutionSet(solutionSet, solutionSetInput);
                SolutionSetBroker.instance().handIn(brokerKey, solutionSet);
            }
            if (waitForSolutionSetUpdate) {
                solutionSetUpdateBarrier = new SolutionSetUpdateBarrier();
                SolutionSetUpdateBarrierBroker.instance().handIn(brokerKey, solutionSetUpdateBarrier);
            }
        } else {
            // bulk iteration case
            @SuppressWarnings("unchecked") TypeSerializerFactory<X> solSer = (TypeSerializerFactory<X>) feedbackTypeSerializer;
            solutionTypeSerializer = solSer;
            // = termination Criterion tail
            if (waitForSolutionSetUpdate) {
                solutionSetUpdateBarrier = new SolutionSetUpdateBarrier();
                SolutionSetUpdateBarrierBroker.instance().handIn(brokerKey, solutionSetUpdateBarrier);
            }
        }
        // instantiate all aggregators and register them at the iteration global registry
        RuntimeAggregatorRegistry aggregatorRegistry = new RuntimeAggregatorRegistry(config.getIterationAggregators(getUserCodeClassLoader()));
        IterationAggregatorBroker.instance().handIn(brokerKey, aggregatorRegistry);
        DataInputView superstepResult = null;
        while (this.running && !terminationRequested()) {
            if (log.isInfoEnabled()) {
                log.info(formatLogString("starting iteration [" + currentIteration() + "]"));
            }
            barrier.setup();
            if (waitForSolutionSetUpdate) {
                solutionSetUpdateBarrier.setup();
            }
            if (!inFirstIteration()) {
                feedBackSuperstepResult(superstepResult);
            }
            super.run();
            // signal to connected tasks that we are done with the superstep
            sendEndOfSuperstepToAllIterationOutputs();
            if (waitForSolutionSetUpdate) {
                solutionSetUpdateBarrier.waitForSolutionSetUpdate();
            }
            // blocking call to wait for the result
            superstepResult = backChannel.getReadEndAfterSuperstepEnded();
            if (log.isInfoEnabled()) {
                log.info(formatLogString("finishing iteration [" + currentIteration() + "]"));
            }
            sendEventToSync(new WorkerDoneEvent(workerIndex, aggregatorRegistry.getAllAggregators()));
            if (log.isInfoEnabled()) {
                log.info(formatLogString("waiting for other workers in iteration [" + currentIteration() + "]"));
            }
            barrier.waitForOtherWorkers();
            if (barrier.terminationSignaled()) {
                if (log.isInfoEnabled()) {
                    log.info(formatLogString("head received termination request in iteration [" + currentIteration() + "]"));
                }
                requestTermination();
                nextStepKickoff.signalTermination();
            } else {
                incrementIterationCounter();
                String[] globalAggregateNames = barrier.getAggregatorNames();
                Value[] globalAggregates = barrier.getAggregates();
                aggregatorRegistry.updateGlobalAggregatesAndReset(globalAggregateNames, globalAggregates);
                nextStepKickoff.triggerNextSuperstep();
            }
        }
        if (log.isInfoEnabled()) {
            log.info(formatLogString("streaming out final result after [" + currentIteration() + "] iterations"));
        }
        if (isWorksetIteration) {
            if (objectSolutionSet) {
                streamSolutionSetToFinalOutput(solutionSetObjectMap);
            } else {
                streamSolutionSetToFinalOutput(solutionSet);
            }
        } else {
            streamOutFinalOutputBulk(new InputViewIterator<X>(superstepResult, this.solutionTypeSerializer.getSerializer()));
        }
        this.finalOutputCollector.close();
    } finally {
        // make sure we unregister everything from the broker:
        // - backchannel
        // - aggregator registry
        // - solution set index
        IterationAggregatorBroker.instance().remove(brokerKey);
        BlockingBackChannelBroker.instance().remove(brokerKey);
        SuperstepKickoffLatchBroker.instance().remove(brokerKey);
        SolutionSetBroker.instance().remove(brokerKey);
        SolutionSetUpdateBarrierBroker.instance().remove(brokerKey);
        if (solutionSet != null) {
            solutionSet.close();
        }
    }
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) SolutionSetUpdateBarrier(org.apache.flink.runtime.iterative.concurrent.SolutionSetUpdateBarrier) DataInputView(org.apache.flink.core.memory.DataInputView) WorkerDoneEvent(org.apache.flink.runtime.iterative.event.WorkerDoneEvent) SuperstepKickoffLatch(org.apache.flink.runtime.iterative.concurrent.SuperstepKickoffLatch) Value(org.apache.flink.types.Value) BlockingBackChannel(org.apache.flink.runtime.iterative.concurrent.BlockingBackChannel) SuperstepBarrier(org.apache.flink.runtime.iterative.concurrent.SuperstepBarrier) TypeSerializerFactory(org.apache.flink.api.common.typeutils.TypeSerializerFactory)

Example 17 with MutableObjectIterator

use of org.apache.flink.util.MutableObjectIterator in project flink by apache.

the class CompactingHashTableTest method testHashTableGrowthWithInsertOrReplace.

/**
	 * This test validates that records are not lost via "insertOrReplace()" as in bug [FLINK-2361]
	 *
	 * This has to be duplicated in InPlaceMutableHashTableTest and CompactingHashTableTest
	 * because of the different constructor calls.
	 */
@Test
public void testHashTableGrowthWithInsertOrReplace() {
    try {
        final int numElements = 1000000;
        List<MemorySegment> memory = getMemory(10000, 32 * 1024);
        // we create a hash table that thinks the records are super large. that makes it choose initially
        // a lot of memory for the partition buffers, and start with a smaller hash table. that way
        // we trigger a hash table growth early.
        CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>(tuple2LongStringSerializer, tuple2LongStringComparator, memory, 10000);
        table.open();
        for (long i = 0; i < numElements; i++) {
            table.insertOrReplaceRecord(new Tuple2<Long, String>(i, String.valueOf(i)));
        }
        // make sure that all elements are contained via the entry iterator
        {
            BitSet bitSet = new BitSet(numElements);
            MutableObjectIterator<Tuple2<Long, String>> iter = table.getEntryIterator();
            Tuple2<Long, String> next;
            while ((next = iter.next()) != null) {
                assertNotNull(next.f0);
                assertNotNull(next.f1);
                assertEquals(next.f0.longValue(), Long.parseLong(next.f1));
                bitSet.set(next.f0.intValue());
            }
            assertEquals(numElements, bitSet.cardinality());
        }
        // make sure all entries are contained via the prober
        {
            CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Long> proper = table.getProber(probeComparator, pairComparator);
            for (long i = 0; i < numElements; i++) {
                assertNotNull(proper.getMatchFor(i));
                assertNull(proper.getMatchFor(i + numElements));
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) BitSet(java.util.BitSet) MemorySegment(org.apache.flink.core.memory.MemorySegment) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Test(org.junit.Test)

Example 18 with MutableObjectIterator

use of org.apache.flink.util.MutableObjectIterator in project flink by apache.

the class HashTableITCase method testFailingHashJoinTooManyRecursions.

/*
	 * This test is basically identical to the "testSpillingHashJoinWithMassiveCollisions" test, only that the number
	 * of repeated values (causing bucket collisions) are large enough to make sure that their target partition no longer
	 * fits into memory by itself and needs to be repartitioned in the recursion again.
	 */
@Test
public void testFailingHashJoinTooManyRecursions() throws IOException {
    // the following two values are known to have a hash-code collision on the first recursion level.
    // we use them to make sure one partition grows over-proportionally large
    final int REPEATED_VALUE_1 = 40559;
    final int REPEATED_VALUE_2 = 92882;
    final int REPEATED_VALUE_COUNT = 3000000;
    final int NUM_KEYS = 1000000;
    final int BUILD_VALS_PER_KEY = 3;
    final int PROBE_VALS_PER_KEY = 10;
    // create a build input that gives 3 million pairs with 3 values sharing the same key, plus 400k pairs with two colliding keys
    MutableObjectIterator<Record> build1 = new UniformRecordGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
    MutableObjectIterator<Record> build2 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
    MutableObjectIterator<Record> build3 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
    List<MutableObjectIterator<Record>> builds = new ArrayList<MutableObjectIterator<Record>>();
    builds.add(build1);
    builds.add(build2);
    builds.add(build3);
    MutableObjectIterator<Record> buildInput = new UnionIterator<Record>(builds);
    // create a probe input that gives 10 million pairs with 10 values sharing a key
    MutableObjectIterator<Record> probe1 = new UniformRecordGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
    MutableObjectIterator<Record> probe2 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
    MutableObjectIterator<Record> probe3 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
    List<MutableObjectIterator<Record>> probes = new ArrayList<MutableObjectIterator<Record>>();
    probes.add(probe1);
    probes.add(probe2);
    probes.add(probe3);
    MutableObjectIterator<Record> probeInput = new UnionIterator<Record>(probes);
    // allocate the memory for the HashTable
    List<MemorySegment> memSegments;
    try {
        memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
    } catch (MemoryAllocationException maex) {
        fail("Memory for the Join could not be provided.");
        return;
    }
    // ----------------------------------------------------------------------------------------
    final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(this.recordBuildSideAccesssor, this.recordProbeSideAccesssor, this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator, memSegments, ioManager);
    join.open(buildInput, probeInput);
    final Record recordReuse = new Record();
    try {
        while (join.nextRecord()) {
            MutableObjectIterator<Record> buildSide = join.getBuildSideIterator();
            if (buildSide.next(recordReuse) == null) {
                fail("No build side values found for a probe key.");
            }
            while (buildSide.next(recordReuse) != null) ;
        }
        fail("Hash Join must have failed due to too many recursions.");
    } catch (Exception ex) {
    // expected
    }
    join.close();
    // ----------------------------------------------------------------------------------------
    this.memManager.release(join.getFreedMemory());
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) UnionIterator(org.apache.flink.runtime.operators.testutils.UnionIterator) MemoryAllocationException(org.apache.flink.runtime.memory.MemoryAllocationException) ArrayList(java.util.ArrayList) MemorySegment(org.apache.flink.core.memory.MemorySegment) MemoryAllocationException(org.apache.flink.runtime.memory.MemoryAllocationException) NullKeyFieldException(org.apache.flink.types.NullKeyFieldException) IOException(java.io.IOException) Record(org.apache.flink.types.Record) UniformRecordGenerator(org.apache.flink.runtime.operators.testutils.UniformRecordGenerator) Test(org.junit.Test)

Example 19 with MutableObjectIterator

use of org.apache.flink.util.MutableObjectIterator in project flink by apache.

the class HashTableRecordWidthCombinations method main.

public static void main(String[] args) throws Exception {
    @SuppressWarnings("unchecked") final TypeSerializer<Tuple2<Long, byte[]>> buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>((Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class, new TypeSerializer<?>[] { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE });
    final TypeSerializer<Long> probeSerializer = LongSerializer.INSTANCE;
    final TypeComparator<Tuple2<Long, byte[]>> buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] { 0 }, new TypeComparator<?>[] { new LongComparator(true) }, new TypeSerializer<?>[] { LongSerializer.INSTANCE });
    final TypeComparator<Long> probeComparator = new LongComparator(true);
    final TypePairComparator<Long, Tuple2<Long, byte[]>> pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() {

        private long ref;

        @Override
        public void setReference(Long reference) {
            ref = reference;
        }

        @Override
        public boolean equalToReference(Tuple2<Long, byte[]> candidate) {
            //noinspection UnnecessaryUnboxing
            return candidate.f0.longValue() == ref;
        }

        @Override
        public int compareToReference(Tuple2<Long, byte[]> candidate) {
            long x = ref;
            long y = candidate.f0;
            return (x < y) ? -1 : ((x == y) ? 0 : 1);
        }
    };
    final IOManager ioMan = new IOManagerAsync();
    try {
        final int pageSize = 32 * 1024;
        final int numSegments = 34;
        for (int num = 3400; num < 3550; num++) {
            final int numRecords = num;
            for (int recordLen = 270; recordLen < 320; recordLen++) {
                final byte[] payload = new byte[recordLen - 8 - 4];
                System.out.println("testing " + numRecords + " / " + recordLen);
                List<MemorySegment> memory = getMemory(numSegments, pageSize);
                // we create a hash table that thinks the records are super large. that makes it choose initially
                // a lot of memory for the partition buffers, and start with a smaller hash table. that way
                // we trigger a hash table growth early.
                MutableHashTable<Tuple2<Long, byte[]>, Long> table = new MutableHashTable<>(buildSerializer, probeSerializer, buildComparator, probeComparator, pairComparator, memory, ioMan, 16, false);
                final MutableObjectIterator<Tuple2<Long, byte[]>> buildInput = new MutableObjectIterator<Tuple2<Long, byte[]>>() {

                    private int count = 0;

                    @Override
                    public Tuple2<Long, byte[]> next(Tuple2<Long, byte[]> reuse) {
                        return next();
                    }

                    @Override
                    public Tuple2<Long, byte[]> next() {
                        if (count++ < numRecords) {
                            return new Tuple2<>(42L, payload);
                        } else {
                            return null;
                        }
                    }
                };
                // probe side
                final MutableObjectIterator<Long> probeInput = new MutableObjectIterator<Long>() {

                    private final long numRecords = 10000;

                    private long value = 0;

                    @Override
                    public Long next(Long aLong) {
                        return next();
                    }

                    @Override
                    public Long next() {
                        if (value < numRecords) {
                            return value++;
                        } else {
                            return null;
                        }
                    }
                };
                table.open(buildInput, probeInput);
                try {
                    while (table.nextRecord()) {
                        MutableObjectIterator<Tuple2<Long, byte[]>> matches = table.getBuildSideIterator();
                        while (matches.next() != null) ;
                    }
                } catch (RuntimeException e) {
                    if (!e.getMessage().contains("exceeded maximum number of recursions")) {
                        throw e;
                    }
                } finally {
                    table.close();
                }
                // make sure no temp files are left
                checkNoTempFilesRemain(ioMan);
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        ioMan.shutdown();
    }
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) TupleComparator(org.apache.flink.api.java.typeutils.runtime.TupleComparator) TupleSerializer(org.apache.flink.api.java.typeutils.runtime.TupleSerializer) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TypePairComparator(org.apache.flink.api.common.typeutils.TypePairComparator) LongComparator(org.apache.flink.api.common.typeutils.base.LongComparator) MemorySegment(org.apache.flink.core.memory.MemorySegment) Tuple2(org.apache.flink.api.java.tuple.Tuple2) MutableHashTable(org.apache.flink.runtime.operators.hash.MutableHashTable)

Example 20 with MutableObjectIterator

use of org.apache.flink.util.MutableObjectIterator in project flink by apache.

the class ReusingSortMergeInnerJoinIteratorITCase method testMergeWithHighNumberOfCommonKeys.

@Test
public void testMergeWithHighNumberOfCommonKeys() {
    // the size of the left and right inputs
    final int INPUT_1_SIZE = 200;
    final int INPUT_2_SIZE = 100;
    final int INPUT_1_DUPLICATES = 10;
    final int INPUT_2_DUPLICATES = 4000;
    final int DUPLICATE_KEY = 13;
    try {
        final TupleGenerator generator1 = new TupleGenerator(SEED1, 500, 4096, KeyMode.SORTED, ValueMode.RANDOM_LENGTH);
        final TupleGenerator generator2 = new TupleGenerator(SEED2, 500, 2048, KeyMode.SORTED, ValueMode.RANDOM_LENGTH);
        final TestData.TupleGeneratorIterator gen1Iter = new TestData.TupleGeneratorIterator(generator1, INPUT_1_SIZE);
        final TestData.TupleGeneratorIterator gen2Iter = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
        final TestData.TupleConstantValueIterator const1Iter = new TestData.TupleConstantValueIterator(DUPLICATE_KEY, "LEFT String for Duplicate Keys", INPUT_1_DUPLICATES);
        final TestData.TupleConstantValueIterator const2Iter = new TestData.TupleConstantValueIterator(DUPLICATE_KEY, "RIGHT String for Duplicate Keys", INPUT_2_DUPLICATES);
        final List<MutableObjectIterator<Tuple2<Integer, String>>> inList1 = new ArrayList<MutableObjectIterator<Tuple2<Integer, String>>>();
        inList1.add(gen1Iter);
        inList1.add(const1Iter);
        final List<MutableObjectIterator<Tuple2<Integer, String>>> inList2 = new ArrayList<MutableObjectIterator<Tuple2<Integer, String>>>();
        inList2.add(gen2Iter);
        inList2.add(const2Iter);
        MutableObjectIterator<Tuple2<Integer, String>> input1 = new MergeIterator<Tuple2<Integer, String>>(inList1, comparator1.duplicate());
        MutableObjectIterator<Tuple2<Integer, String>> input2 = new MergeIterator<Tuple2<Integer, String>>(inList2, comparator2.duplicate());
        // collect expected data
        final Map<Integer, Collection<Match>> expectedMatchesMap = matchValues(collectData(input1), collectData(input2));
        // re-create the whole thing for actual processing
        // reset the generators and iterators
        generator1.reset();
        generator2.reset();
        const1Iter.reset();
        const2Iter.reset();
        gen1Iter.reset();
        gen2Iter.reset();
        inList1.clear();
        inList1.add(gen1Iter);
        inList1.add(const1Iter);
        inList2.clear();
        inList2.add(gen2Iter);
        inList2.add(const2Iter);
        input1 = new MergeIterator<Tuple2<Integer, String>>(inList1, comparator1.duplicate());
        input2 = new MergeIterator<Tuple2<Integer, String>>(inList2, comparator2.duplicate());
        final FlatJoinFunction<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> matcher = new MatchRemovingJoiner(expectedMatchesMap);
        final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<Tuple2<Integer, String>>();
        // we create this sort-merge iterator with little memory for the block-nested-loops fall-back to make sure it
        // needs to spill for the duplicate keys
        ReusingMergeInnerJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new ReusingMergeInnerJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>>(input1, input2, this.serializer1, this.comparator1, this.serializer2, this.comparator2, this.pairComparator, this.memoryManager, this.ioManager, PAGES_FOR_BNLJN, this.parentTask);
        iterator.open();
        while (iterator.callWithNextKey(matcher, collector)) ;
        iterator.close();
        // assert that each expected match was seen
        for (Entry<Integer, Collection<Match>> entry : expectedMatchesMap.entrySet()) {
            if (!entry.getValue().isEmpty()) {
                Assert.fail("Collection for key " + entry.getKey() + " is not empty");
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("An exception occurred during the test: " + e.getMessage());
    }
}
Also used : TestData(org.apache.flink.runtime.operators.testutils.TestData) MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) ArrayList(java.util.ArrayList) MatchRemovingJoiner(org.apache.flink.runtime.operators.testutils.MatchRemovingJoiner) TupleGenerator(org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator) DiscardingOutputCollector(org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Collection(java.util.Collection) Test(org.junit.Test)

Aggregations

MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)21 Test (org.junit.Test)18 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)14 ArrayList (java.util.ArrayList)13 UnionIterator (org.apache.flink.runtime.operators.testutils.UnionIterator)10 MemorySegment (org.apache.flink.core.memory.MemorySegment)9 Collection (java.util.Collection)7 DiscardingOutputCollector (org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector)7 IOException (java.io.IOException)6 MemoryAllocationException (org.apache.flink.runtime.memory.MemoryAllocationException)6 TestData (org.apache.flink.runtime.operators.testutils.TestData)6 NullKeyFieldException (org.apache.flink.types.NullKeyFieldException)6 TupleGenerator (org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator)5 HashMap (java.util.HashMap)4 Map (java.util.Map)4 Random (java.util.Random)4 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)4 TypeInformation (org.apache.flink.api.common.typeinfo.TypeInformation)4 TupleTypeInfo (org.apache.flink.api.java.typeutils.TupleTypeInfo)4 ValueTypeInfo (org.apache.flink.api.java.typeutils.ValueTypeInfo)4