Search in sources :

Example 1 with ParallelSourceFunction

use of org.apache.flink.streaming.api.functions.source.ParallelSourceFunction in project flink by apache.

the class StreamExecutionEnvironment method addSource.

/**
	 * Ads a data source with a custom type information thus opening a
	 * {@link DataStream}. Only in very special cases does the user need to
	 * support type information. Otherwise use
	 * {@link #addSource(org.apache.flink.streaming.api.functions.source.SourceFunction)}
	 *
	 * @param function
	 * 		the user defined function
	 * @param sourceName
	 * 		Name of the data source
	 * @param <OUT>
	 * 		type of the returned stream
	 * @param typeInfo
	 * 		the user defined type information for the stream
	 * @return the data stream constructed
	 */
@SuppressWarnings("unchecked")
public <OUT> DataStreamSource<OUT> addSource(SourceFunction<OUT> function, String sourceName, TypeInformation<OUT> typeInfo) {
    if (typeInfo == null) {
        if (function instanceof ResultTypeQueryable) {
            typeInfo = ((ResultTypeQueryable<OUT>) function).getProducedType();
        } else {
            try {
                typeInfo = TypeExtractor.createTypeInfo(SourceFunction.class, function.getClass(), 0, null, null);
            } catch (final InvalidTypesException e) {
                typeInfo = (TypeInformation<OUT>) new MissingTypeInfo(sourceName, e);
            }
        }
    }
    boolean isParallel = function instanceof ParallelSourceFunction;
    clean(function);
    StreamSource<OUT, ?> sourceOperator;
    if (function instanceof StoppableFunction) {
        sourceOperator = new StoppableStreamSource<>(cast2StoppableSourceFunction(function));
    } else {
        sourceOperator = new StreamSource<>(function);
    }
    return new DataStreamSource<>(this, typeInfo, sourceOperator, isParallel, sourceName);
}
Also used : ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) InputFormatSourceFunction(org.apache.flink.streaming.api.functions.source.InputFormatSourceFunction) MissingTypeInfo(org.apache.flink.api.java.typeutils.MissingTypeInfo) ResultTypeQueryable(org.apache.flink.api.java.typeutils.ResultTypeQueryable) DataStreamSource(org.apache.flink.streaming.api.datastream.DataStreamSource) StoppableFunction(org.apache.flink.api.common.functions.StoppableFunction) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) InvalidTypesException(org.apache.flink.api.common.functions.InvalidTypesException)

Example 2 with ParallelSourceFunction

use of org.apache.flink.streaming.api.functions.source.ParallelSourceFunction in project flink by apache.

the class CommonExecTableSourceScan method createSourceFunctionTransformation.

/**
 * Adopted from {@link StreamExecutionEnvironment#addSource(SourceFunction, String,
 * TypeInformation)} but with custom {@link Boundedness}.
 */
protected Transformation<RowData> createSourceFunctionTransformation(StreamExecutionEnvironment env, SourceFunction<RowData> function, boolean isBounded, String operatorName, TypeInformation<RowData> outputTypeInfo) {
    env.clean(function);
    final int parallelism;
    if (function instanceof ParallelSourceFunction) {
        parallelism = env.getParallelism();
    } else {
        parallelism = 1;
    }
    final Boundedness boundedness;
    if (isBounded) {
        boundedness = Boundedness.BOUNDED;
    } else {
        boundedness = Boundedness.CONTINUOUS_UNBOUNDED;
    }
    final StreamSource<RowData, ?> sourceOperator = new StreamSource<>(function, !isBounded);
    return new LegacySourceTransformation<>(operatorName, sourceOperator, outputTypeInfo, parallelism, boundedness);
}
Also used : RowData(org.apache.flink.table.data.RowData) Boundedness(org.apache.flink.api.connector.source.Boundedness) StreamSource(org.apache.flink.streaming.api.operators.StreamSource) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) LegacySourceTransformation(org.apache.flink.streaming.api.transformations.LegacySourceTransformation)

Example 3 with ParallelSourceFunction

use of org.apache.flink.streaming.api.functions.source.ParallelSourceFunction in project flink by apache.

the class StreamingJobGraphGeneratorTest method createJobGraphForManagedMemoryFractionTest.

private JobGraph createJobGraphForManagedMemoryFractionTest(final List<ResourceSpec> resourceSpecs, final List<Map<ManagedMemoryUseCase, Integer>> operatorScopeUseCaseWeights, final List<Set<ManagedMemoryUseCase>> slotScopeUseCases) throws Exception {
    final Method opMethod = getSetResourcesMethodAndSetAccessible(SingleOutputStreamOperator.class);
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    final DataStream<Integer> source = env.addSource(new ParallelSourceFunction<Integer>() {

        @Override
        public void run(SourceContext<Integer> ctx) {
        }

        @Override
        public void cancel() {
        }
    });
    opMethod.invoke(source, resourceSpecs.get(0));
    // CHAIN(source -> map1) in default slot sharing group
    final DataStream<Integer> map1 = source.map((MapFunction<Integer, Integer>) value -> value);
    opMethod.invoke(map1, resourceSpecs.get(1));
    // CHAIN(map2) in default slot sharing group
    final DataStream<Integer> map2 = map1.rebalance().map((MapFunction<Integer, Integer>) value -> value);
    opMethod.invoke(map2, resourceSpecs.get(2));
    // CHAIN(map3) in test slot sharing group
    final DataStream<Integer> map3 = map2.rebalance().map(value -> value).slotSharingGroup("test");
    opMethod.invoke(map3, resourceSpecs.get(3));
    declareManagedMemoryUseCaseForTranformation(source.getTransformation(), operatorScopeUseCaseWeights.get(0), slotScopeUseCases.get(0));
    declareManagedMemoryUseCaseForTranformation(map1.getTransformation(), operatorScopeUseCaseWeights.get(1), slotScopeUseCases.get(1));
    declareManagedMemoryUseCaseForTranformation(map2.getTransformation(), operatorScopeUseCaseWeights.get(2), slotScopeUseCases.get(2));
    declareManagedMemoryUseCaseForTranformation(map3.getTransformation(), operatorScopeUseCaseWeights.get(3), slotScopeUseCases.get(3));
    return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
Also used : Arrays(java.util.Arrays) Tuple2(org.apache.flink.api.java.tuple.Tuple2) TypeSerializerInputFormat(org.apache.flink.api.java.io.TypeSerializerInputFormat) YieldingOperatorFactory(org.apache.flink.streaming.api.operators.YieldingOperatorFactory) AbstractStreamOperatorFactory(org.apache.flink.streaming.api.operators.AbstractStreamOperatorFactory) UserCodeWrapper(org.apache.flink.api.common.operators.util.UserCodeWrapper) ResourceSpec(org.apache.flink.api.common.operators.ResourceSpec) ManagedMemoryUseCase(org.apache.flink.core.memory.ManagedMemoryUseCase) Map(java.util.Map) CoLocationGroup(org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroup) ForwardPartitioner(org.apache.flink.streaming.runtime.partitioner.ForwardPartitioner) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) TaskConfig(org.apache.flink.runtime.operators.util.TaskConfig) Set(java.util.Set) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) FilterFunction(org.apache.flink.api.common.functions.FilterFunction) Assert.assertFalse(org.junit.Assert.assertFalse) StreamingJobGraphGenerator.areOperatorsChainable(org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator.areOperatorsChainable) Boundedness(org.apache.flink.api.connector.source.Boundedness) OneInputStreamOperatorFactory(org.apache.flink.streaming.api.operators.OneInputStreamOperatorFactory) MultipleInputTransformation(org.apache.flink.streaming.api.transformations.MultipleInputTransformation) NumberSequenceSource(org.apache.flink.api.connector.source.lib.NumberSequenceSource) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) ArrayList(java.util.ArrayList) TaskManagerOptions(org.apache.flink.configuration.TaskManagerOptions) Collector(org.apache.flink.util.Collector) Iterables(org.apache.flink.shaded.guava30.com.google.common.collect.Iterables) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Types(org.apache.flink.api.common.typeinfo.Types) DataStreamSink(org.apache.flink.streaming.api.datastream.DataStreamSink) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) MailboxExecutor(org.apache.flink.api.common.operators.MailboxExecutor) SingleOutputStreamOperator(org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) StreamOperator(org.apache.flink.streaming.api.operators.StreamOperator) Assert.assertNull(org.junit.Assert.assertNull) Matcher(org.hamcrest.Matcher) Transformation(org.apache.flink.api.dag.Transformation) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) Assert.assertEquals(org.junit.Assert.assertEquals) CoreMatchers.is(org.hamcrest.CoreMatchers.is) PipelineOptions(org.apache.flink.configuration.PipelineOptions) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CheckpointingMode(org.apache.flink.streaming.api.CheckpointingMode) MapFunction(org.apache.flink.api.common.functions.MapFunction) BasicTypeInfo(org.apache.flink.api.common.typeinfo.BasicTypeInfo) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) TestLogger(org.apache.flink.util.TestLogger) InputFormat(org.apache.flink.api.common.io.InputFormat) Assert.fail(org.junit.Assert.fail) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) Method(java.lang.reflect.Method) OutputFormat(org.apache.flink.api.common.io.OutputFormat) JobCheckpointingSettings(org.apache.flink.runtime.jobgraph.tasks.JobCheckpointingSettings) PartitionTransformation(org.apache.flink.streaming.api.transformations.PartitionTransformation) WatermarkStrategy(org.apache.flink.api.common.eventtime.WatermarkStrategy) Collectors(java.util.stream.Collectors) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) SimpleOperatorFactory(org.apache.flink.streaming.api.operators.SimpleOperatorFactory) List(java.util.List) MultipleInputStreamTask(org.apache.flink.streaming.runtime.tasks.MultipleInputStreamTask) SerializedValue(org.apache.flink.util.SerializedValue) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) IterativeStream(org.apache.flink.streaming.api.datastream.IterativeStream) StreamOperatorFactory(org.apache.flink.streaming.api.operators.StreamOperatorFactory) InputOutputFormatVertex(org.apache.flink.runtime.jobgraph.InputOutputFormatVertex) ResultPartitionType(org.apache.flink.runtime.io.network.partition.ResultPartitionType) HashMap(java.util.HashMap) DataStreamSource(org.apache.flink.streaming.api.datastream.DataStreamSource) JobType(org.apache.flink.runtime.jobgraph.JobType) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) MockSource(org.apache.flink.api.connector.source.mocks.MockSource) SourceOperatorStreamTask(org.apache.flink.streaming.runtime.tasks.SourceOperatorStreamTask) StreamMap(org.apache.flink.streaming.api.operators.StreamMap) ReduceFunction(org.apache.flink.api.common.functions.ReduceFunction) RebalancePartitioner(org.apache.flink.streaming.runtime.partitioner.RebalancePartitioner) DiscardingSink(org.apache.flink.streaming.api.functions.sink.DiscardingSink) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) CoordinatedOperatorFactory(org.apache.flink.streaming.api.operators.CoordinatedOperatorFactory) StreamOperatorParameters(org.apache.flink.streaming.api.operators.StreamOperatorParameters) InputFormatSourceFunction(org.apache.flink.streaming.api.functions.source.InputFormatSourceFunction) DataStream(org.apache.flink.streaming.api.datastream.DataStream) RescalePartitioner(org.apache.flink.streaming.runtime.partitioner.RescalePartitioner) FeatureMatcher(org.hamcrest.FeatureMatcher) StreamExchangeMode(org.apache.flink.streaming.api.transformations.StreamExchangeMode) TestAnyModeReadingStreamOperator(org.apache.flink.streaming.util.TestAnyModeReadingStreamOperator) OperatorCoordinator(org.apache.flink.runtime.operators.coordination.OperatorCoordinator) InputOutputFormatContainer(org.apache.flink.runtime.jobgraph.InputOutputFormatContainer) Comparator(java.util.Comparator) RuntimeExecutionMode(org.apache.flink.api.common.RuntimeExecutionMode) Collections(java.util.Collections) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Method(java.lang.reflect.Method)

Example 4 with ParallelSourceFunction

use of org.apache.flink.streaming.api.functions.source.ParallelSourceFunction in project flink by apache.

the class StreamingJobGraphGeneratorTest method testResourcesForIteration.

/**
 * Verifies that the resources are merged correctly for chained operators (covers middle
 * chaining and iteration cases) when generating job graph.
 */
@Test
public void testResourcesForIteration() throws Exception {
    ResourceSpec resource1 = ResourceSpec.newBuilder(0.1, 100).build();
    ResourceSpec resource2 = ResourceSpec.newBuilder(0.2, 200).build();
    ResourceSpec resource3 = ResourceSpec.newBuilder(0.3, 300).build();
    ResourceSpec resource4 = ResourceSpec.newBuilder(0.4, 400).build();
    ResourceSpec resource5 = ResourceSpec.newBuilder(0.5, 500).build();
    Method opMethod = getSetResourcesMethodAndSetAccessible(SingleOutputStreamOperator.class);
    Method sinkMethod = getSetResourcesMethodAndSetAccessible(DataStreamSink.class);
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Integer> source = env.addSource(new ParallelSourceFunction<Integer>() {

        @Override
        public void run(SourceContext<Integer> ctx) throws Exception {
        }

        @Override
        public void cancel() {
        }
    }).name("test_source");
    opMethod.invoke(source, resource1);
    IterativeStream<Integer> iteration = source.iterate(3000);
    opMethod.invoke(iteration, resource2);
    DataStream<Integer> flatMap = iteration.flatMap(new FlatMapFunction<Integer, Integer>() {

        @Override
        public void flatMap(Integer value, Collector<Integer> out) throws Exception {
            out.collect(value);
        }
    }).name("test_flatMap");
    opMethod.invoke(flatMap, resource3);
    // CHAIN(flatMap -> Filter)
    DataStream<Integer> increment = flatMap.filter(new FilterFunction<Integer>() {

        @Override
        public boolean filter(Integer value) throws Exception {
            return false;
        }
    }).name("test_filter");
    opMethod.invoke(increment, resource4);
    DataStreamSink<Integer> sink = iteration.closeWith(increment).addSink(new SinkFunction<Integer>() {

        @Override
        public void invoke(Integer value) throws Exception {
        }
    }).disableChaining().name("test_sink");
    sinkMethod.invoke(sink, resource5);
    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
    for (JobVertex jobVertex : jobGraph.getVertices()) {
        if (jobVertex.getName().contains("test_source")) {
            assertTrue(jobVertex.getMinResources().equals(resource1));
        } else if (jobVertex.getName().contains("Iteration_Source")) {
            assertTrue(jobVertex.getPreferredResources().equals(resource2));
        } else if (jobVertex.getName().contains("test_flatMap")) {
            assertTrue(jobVertex.getMinResources().equals(resource3.merge(resource4)));
        } else if (jobVertex.getName().contains("Iteration_Tail")) {
            assertTrue(jobVertex.getPreferredResources().equals(ResourceSpec.DEFAULT));
        } else if (jobVertex.getName().contains("test_sink")) {
            assertTrue(jobVertex.getMinResources().equals(resource5));
        }
    }
}
Also used : FilterFunction(org.apache.flink.api.common.functions.FilterFunction) ResourceSpec(org.apache.flink.api.common.operators.ResourceSpec) Method(java.lang.reflect.Method) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) Collector(org.apache.flink.util.Collector) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 5 with ParallelSourceFunction

use of org.apache.flink.streaming.api.functions.source.ParallelSourceFunction in project flink by apache.

the class RescalePartitionerTest method testExecutionGraphGeneration.

@Test
public void testExecutionGraphGeneration() throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(4);
    // get input data
    DataStream<String> text = env.addSource(new ParallelSourceFunction<String>() {

        private static final long serialVersionUID = 7772338606389180774L;

        @Override
        public void run(SourceContext<String> ctx) throws Exception {
        }

        @Override
        public void cancel() {
        }
    }).setParallelism(2);
    DataStream<Tuple2<String, Integer>> counts = text.rescale().flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {

        private static final long serialVersionUID = -5255930322161596829L;

        @Override
        public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
        }
    });
    counts.rescale().print().setParallelism(2);
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    List<JobVertex> jobVertices = jobGraph.getVerticesSortedTopologicallyFromSources();
    JobVertex sourceVertex = jobVertices.get(0);
    JobVertex mapVertex = jobVertices.get(1);
    JobVertex sinkVertex = jobVertices.get(2);
    assertEquals(2, sourceVertex.getParallelism());
    assertEquals(4, mapVertex.getParallelism());
    assertEquals(2, sinkVertex.getParallelism());
    ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setVertexParallelismStore(SchedulerBase.computeVertexParallelismStore(jobGraph)).build();
    try {
        eg.attachJobGraph(jobVertices);
    } catch (JobException e) {
        e.printStackTrace();
        fail("Building ExecutionGraph failed: " + e.getMessage());
    }
    ExecutionJobVertex execSourceVertex = eg.getJobVertex(sourceVertex.getID());
    ExecutionJobVertex execMapVertex = eg.getJobVertex(mapVertex.getID());
    ExecutionJobVertex execSinkVertex = eg.getJobVertex(sinkVertex.getID());
    assertEquals(0, execSourceVertex.getInputs().size());
    assertEquals(1, execMapVertex.getInputs().size());
    assertEquals(4, execMapVertex.getParallelism());
    ExecutionVertex[] mapTaskVertices = execMapVertex.getTaskVertices();
    // verify that we have each parallel input partition exactly twice, i.e. that one source
    // sends to two unique mappers
    Map<Integer, Integer> mapInputPartitionCounts = new HashMap<>();
    for (ExecutionVertex mapTaskVertex : mapTaskVertices) {
        assertEquals(1, mapTaskVertex.getNumberOfInputs());
        assertEquals(1, mapTaskVertex.getConsumedPartitionGroup(0).size());
        IntermediateResultPartitionID consumedPartitionId = mapTaskVertex.getConsumedPartitionGroup(0).getFirst();
        assertEquals(sourceVertex.getID(), mapTaskVertex.getExecutionGraphAccessor().getResultPartitionOrThrow(consumedPartitionId).getProducer().getJobvertexId());
        int inputPartition = consumedPartitionId.getPartitionNumber();
        if (!mapInputPartitionCounts.containsKey(inputPartition)) {
            mapInputPartitionCounts.put(inputPartition, 1);
        } else {
            mapInputPartitionCounts.put(inputPartition, mapInputPartitionCounts.get(inputPartition) + 1);
        }
    }
    assertEquals(2, mapInputPartitionCounts.size());
    for (int count : mapInputPartitionCounts.values()) {
        assertEquals(2, count);
    }
    assertEquals(1, execSinkVertex.getInputs().size());
    assertEquals(2, execSinkVertex.getParallelism());
    ExecutionVertex[] sinkTaskVertices = execSinkVertex.getTaskVertices();
    InternalExecutionGraphAccessor executionGraphAccessor = execSinkVertex.getGraph();
    // verify each sink instance has two inputs from the map and that each map subpartition
    // only occurs in one unique input edge
    Set<Integer> mapSubpartitions = new HashSet<>();
    for (ExecutionVertex sinkTaskVertex : sinkTaskVertices) {
        assertEquals(1, sinkTaskVertex.getNumberOfInputs());
        assertEquals(2, sinkTaskVertex.getConsumedPartitionGroup(0).size());
        for (IntermediateResultPartitionID consumedPartitionId : sinkTaskVertex.getConsumedPartitionGroup(0)) {
            IntermediateResultPartition consumedPartition = executionGraphAccessor.getResultPartitionOrThrow(consumedPartitionId);
            assertEquals(mapVertex.getID(), consumedPartition.getProducer().getJobvertexId());
            int partitionNumber = consumedPartition.getPartitionNumber();
            assertFalse(mapSubpartitions.contains(partitionNumber));
            mapSubpartitions.add(partitionNumber);
        }
    }
    assertEquals(4, mapSubpartitions.size());
}
Also used : HashMap(java.util.HashMap) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) JobException(org.apache.flink.runtime.JobException) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) InternalExecutionGraphAccessor(org.apache.flink.runtime.executiongraph.InternalExecutionGraphAccessor) HashSet(java.util.HashSet) JobException(org.apache.flink.runtime.JobException) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) IntermediateResultPartition(org.apache.flink.runtime.executiongraph.IntermediateResultPartition) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Aggregations

ParallelSourceFunction (org.apache.flink.streaming.api.functions.source.ParallelSourceFunction)5 Method (java.lang.reflect.Method)2 HashMap (java.util.HashMap)2 FilterFunction (org.apache.flink.api.common.functions.FilterFunction)2 FlatMapFunction (org.apache.flink.api.common.functions.FlatMapFunction)2 ResourceSpec (org.apache.flink.api.common.operators.ResourceSpec)2 TypeInformation (org.apache.flink.api.common.typeinfo.TypeInformation)2 Boundedness (org.apache.flink.api.connector.source.Boundedness)2 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)2 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)2 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)2 DataStreamSource (org.apache.flink.streaming.api.datastream.DataStreamSource)2 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)2 InputFormatSourceFunction (org.apache.flink.streaming.api.functions.source.InputFormatSourceFunction)2 Test (org.junit.Test)2 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collections (java.util.Collections)1 Comparator (java.util.Comparator)1 HashSet (java.util.HashSet)1