use of org.apache.flink.streaming.api.datastream.DataStream in project flink by apache.
the class TypeFillTest method test.
@Test
public void test() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
try {
env.addSource(new TestSource<Integer>()).print();
fail();
} catch (Exception ignored) {
}
DataStream<Long> source = env.generateSequence(1, 10);
try {
source.map(new TestMap<Long, Long>()).print();
fail();
} catch (Exception ignored) {
}
try {
source.flatMap(new TestFlatMap<Long, Long>()).print();
fail();
} catch (Exception ignored) {
}
try {
source.connect(source).map(new TestCoMap<Long, Long, Integer>()).print();
fail();
} catch (Exception ignored) {
}
try {
source.connect(source).flatMap(new TestCoFlatMap<Long, Long, Integer>()).print();
fail();
} catch (Exception ignored) {
}
try {
source.keyBy(new TestKeySelector<Long, String>()).print();
fail();
} catch (Exception ignored) {
}
try {
source.connect(source).keyBy(new TestKeySelector<Long, String>(), new TestKeySelector<>());
fail();
} catch (Exception ignored) {
}
try {
source.coGroup(source).where(new TestKeySelector<>()).equalTo(new TestKeySelector<>());
fail();
} catch (Exception ignored) {
}
try {
source.join(source).where(new TestKeySelector<>()).equalTo(new TestKeySelector<>());
fail();
} catch (Exception ignored) {
}
try {
source.keyBy((in) -> in).intervalJoin(source.keyBy((in) -> in)).between(Time.milliseconds(10L), Time.milliseconds(10L)).process(new TestProcessJoinFunction<>()).print();
fail();
} catch (Exception ignored) {
}
env.addSource(new TestSource<Integer>()).returns(Integer.class);
source.map(new TestMap<Long, Long>()).returns(Long.class).print();
source.flatMap(new TestFlatMap<Long, Long>()).returns(new TypeHint<Long>() {
}).print();
source.connect(source).map(new TestCoMap<Long, Long, Integer>()).returns(BasicTypeInfo.INT_TYPE_INFO).print();
source.connect(source).flatMap(new TestCoFlatMap<Long, Long, Integer>()).returns(BasicTypeInfo.INT_TYPE_INFO).print();
source.connect(source).keyBy(new TestKeySelector<>(), new TestKeySelector<>(), Types.STRING);
source.coGroup(source).where(new TestKeySelector<>(), Types.STRING).equalTo(new TestKeySelector<>(), Types.STRING);
source.join(source).where(new TestKeySelector<>(), Types.STRING).equalTo(new TestKeySelector<>(), Types.STRING);
source.keyBy((in) -> in).intervalJoin(source.keyBy((in) -> in)).between(Time.milliseconds(10L), Time.milliseconds(10L)).process(new TestProcessJoinFunction<Long, Long, String>()).returns(Types.STRING);
source.keyBy((in) -> in).intervalJoin(source.keyBy((in) -> in)).between(Time.milliseconds(10L), Time.milliseconds(10L)).process(new TestProcessJoinFunction<>(), Types.STRING);
assertEquals(BasicTypeInfo.LONG_TYPE_INFO, source.map(new TestMap<Long, Long>()).returns(Long.class).getType());
SingleOutputStreamOperator<String> map = source.map(new MapFunction<Long, String>() {
@Override
public String map(Long value) throws Exception {
return null;
}
});
map.print();
try {
map.returns(String.class);
fail();
} catch (Exception ignored) {
}
}
use of org.apache.flink.streaming.api.datastream.DataStream in project flink by apache.
the class StreamGraphGeneratorTest method testMaxParallelismForwarding.
/**
* Tests that the global and operator-wide max parallelism setting is respected.
*/
@Test
public void testMaxParallelismForwarding() {
int globalMaxParallelism = 42;
int keyedResult2MaxParallelism = 17;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setMaxParallelism(globalMaxParallelism);
DataStream<Integer> source = env.fromElements(1, 2, 3);
DataStream<Integer> keyedResult1 = source.keyBy(value -> value).map(new NoOpIntMap());
DataStream<Integer> keyedResult2 = keyedResult1.keyBy(value -> value).map(new NoOpIntMap()).setMaxParallelism(keyedResult2MaxParallelism);
keyedResult2.addSink(new DiscardingSink<>());
StreamGraph graph = env.getStreamGraph();
StreamNode keyedResult1Node = graph.getStreamNode(keyedResult1.getId());
StreamNode keyedResult2Node = graph.getStreamNode(keyedResult2.getId());
assertEquals(globalMaxParallelism, keyedResult1Node.getMaxParallelism());
assertEquals(keyedResult2MaxParallelism, keyedResult2Node.getMaxParallelism());
}
use of org.apache.flink.streaming.api.datastream.DataStream in project flink by apache.
the class StreamGraphGeneratorTest method testConfigureSlotSharingGroupResource.
@Test
public void testConfigureSlotSharingGroupResource() {
final SlotSharingGroup ssg1 = SlotSharingGroup.newBuilder("ssg1").setCpuCores(1).setTaskHeapMemoryMB(100).build();
final SlotSharingGroup ssg2 = SlotSharingGroup.newBuilder("ssg2").setCpuCores(2).setTaskHeapMemoryMB(200).build();
final SlotSharingGroup ssg3 = SlotSharingGroup.newBuilder(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP).setCpuCores(3).setTaskHeapMemoryMB(300).build();
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
final DataStream<Integer> source = env.fromElements(1).slotSharingGroup("ssg1");
source.map(value -> value).slotSharingGroup(ssg2).map(value -> value * 2).map(value -> value * 3).slotSharingGroup(SlotSharingGroup.newBuilder("ssg4").build()).map(value -> value * 4).slotSharingGroup(ssg3).addSink(new DiscardingSink<>()).slotSharingGroup(ssg1);
final StreamGraph streamGraph = env.getStreamGraph();
assertThat(streamGraph.getSlotSharingGroupResource("ssg1").get(), is(ResourceProfile.fromResources(1, 100)));
assertThat(streamGraph.getSlotSharingGroupResource("ssg2").get(), is(ResourceProfile.fromResources(2, 200)));
assertThat(streamGraph.getSlotSharingGroupResource(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP).get(), is(ResourceProfile.fromResources(3, 300)));
}
use of org.apache.flink.streaming.api.datastream.DataStream in project flink by apache.
the class StreamingJobGraphGeneratorWithGlobalStreamExchangeModeTest method testGlobalExchangeModeDoesNotOverrideSpecifiedExchangeMode.
@Test
public void testGlobalExchangeModeDoesNotOverrideSpecifiedExchangeMode() {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
final DataStream<Integer> source = env.fromElements(1, 2, 3).setParallelism(1);
final DataStream<Integer> forward = new DataStream<>(env, new PartitionTransformation<>(source.getTransformation(), new ForwardPartitioner<>(), StreamExchangeMode.PIPELINED));
forward.map(i -> i).startNewChain().setParallelism(1);
final StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setGlobalStreamExchangeMode(GlobalStreamExchangeMode.ALL_EDGES_BLOCKING);
final JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
final List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
final JobVertex sourceVertex = verticesSorted.get(0);
assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType());
}
use of org.apache.flink.streaming.api.datastream.DataStream in project flink by apache.
the class StreamingJobGraphGeneratorWithGlobalStreamExchangeModeTest method createStreamGraph.
/**
* Topology: source(parallelism=1) --(forward)--> map1(parallelism=1) --(rescale)-->
* map2(parallelism=2) --(rebalance)--> sink(parallelism=2).
*/
private static StreamGraph createStreamGraph(GlobalStreamExchangeMode globalStreamExchangeMode) {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
if (globalStreamExchangeMode != GlobalStreamExchangeMode.ALL_EDGES_PIPELINED) {
env.setBufferTimeout(-1);
}
final DataStream<Integer> source = env.fromElements(1, 2, 3).setParallelism(1);
final DataStream<Integer> forward = new DataStream<>(env, new PartitionTransformation<>(source.getTransformation(), new ForwardPartitioner<>(), StreamExchangeMode.UNDEFINED));
final DataStream<Integer> map1 = forward.map(i -> i).startNewChain().setParallelism(1);
final DataStream<Integer> rescale = new DataStream<>(env, new PartitionTransformation<>(map1.getTransformation(), new RescalePartitioner<>(), StreamExchangeMode.UNDEFINED));
final DataStream<Integer> map2 = rescale.map(i -> i).setParallelism(2);
map2.rebalance().print().setParallelism(2);
final StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setGlobalStreamExchangeMode(globalStreamExchangeMode);
return streamGraph;
}
Aggregations