use of org.apache.flink.util.Collector in project flink by apache.
the class StreamingJobGraphGeneratorTest method testResourcesForIteration.
/**
* Verifies that the resources are merged correctly for chained operators (covers middle chaining and iteration cases)
* when generating job graph
*/
@Test
public void testResourcesForIteration() throws Exception {
ResourceSpec resource1 = new ResourceSpec(0.1, 100);
ResourceSpec resource2 = new ResourceSpec(0.2, 200);
ResourceSpec resource3 = new ResourceSpec(0.3, 300);
ResourceSpec resource4 = new ResourceSpec(0.4, 400);
ResourceSpec resource5 = new ResourceSpec(0.5, 500);
Method opMethod = SingleOutputStreamOperator.class.getDeclaredMethod("setResources", ResourceSpec.class);
opMethod.setAccessible(true);
Method sinkMethod = DataStreamSink.class.getDeclaredMethod("setResources", ResourceSpec.class);
sinkMethod.setAccessible(true);
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Integer> source = env.addSource(new ParallelSourceFunction<Integer>() {
@Override
public void run(SourceContext<Integer> ctx) throws Exception {
}
@Override
public void cancel() {
}
}).name("test_source");
opMethod.invoke(source, resource1);
IterativeStream<Integer> iteration = source.iterate(3000);
opMethod.invoke(iteration, resource2);
DataStream<Integer> flatMap = iteration.flatMap(new FlatMapFunction<Integer, Integer>() {
@Override
public void flatMap(Integer value, Collector<Integer> out) throws Exception {
out.collect(value);
}
}).name("test_flatMap");
opMethod.invoke(flatMap, resource3);
// CHAIN(flatMap -> Filter)
DataStream<Integer> increment = flatMap.filter(new FilterFunction<Integer>() {
@Override
public boolean filter(Integer value) throws Exception {
return false;
}
}).name("test_filter");
opMethod.invoke(increment, resource4);
DataStreamSink<Integer> sink = iteration.closeWith(increment).addSink(new SinkFunction<Integer>() {
@Override
public void invoke(Integer value) throws Exception {
}
}).disableChaining().name("test_sink");
sinkMethod.invoke(sink, resource5);
JobGraph jobGraph = new StreamingJobGraphGenerator(env.getStreamGraph(), 1).createJobGraph();
for (JobVertex jobVertex : jobGraph.getVertices()) {
if (jobVertex.getName().contains("test_source")) {
assertTrue(jobVertex.getMinResources().equals(resource1));
} else if (jobVertex.getName().contains("Iteration_Source")) {
assertTrue(jobVertex.getPreferredResources().equals(resource2));
} else if (jobVertex.getName().contains("test_flatMap")) {
assertTrue(jobVertex.getMinResources().equals(resource3.merge(resource4)));
} else if (jobVertex.getName().contains("Iteration_Tail")) {
assertTrue(jobVertex.getPreferredResources().equals(ResourceSpec.DEFAULT));
} else if (jobVertex.getName().contains("test_sink")) {
assertTrue(jobVertex.getMinResources().equals(resource5));
}
}
}
use of org.apache.flink.util.Collector in project flink by apache.
the class InternalWindowFunctionTest method testInternalIterableWindowFunction.
@SuppressWarnings("unchecked")
@Test
public void testInternalIterableWindowFunction() throws Exception {
WindowFunctionMock mock = mock(WindowFunctionMock.class);
InternalIterableWindowFunction<Long, String, Long, TimeWindow> windowFunction = new InternalIterableWindowFunction<>(mock);
// check setOutputType
TypeInformation<String> stringType = BasicTypeInfo.STRING_TYPE_INFO;
ExecutionConfig execConf = new ExecutionConfig();
execConf.setParallelism(42);
StreamingFunctionUtils.setOutputType(windowFunction, stringType, execConf);
verify(mock).setOutputType(stringType, execConf);
// check open
Configuration config = new Configuration();
windowFunction.open(config);
verify(mock).open(config);
// check setRuntimeContext
RuntimeContext rCtx = mock(RuntimeContext.class);
windowFunction.setRuntimeContext(rCtx);
verify(mock).setRuntimeContext(rCtx);
// check apply
TimeWindow w = mock(TimeWindow.class);
Iterable<Long> i = (Iterable<Long>) mock(Iterable.class);
Collector<String> c = (Collector<String>) mock(Collector.class);
windowFunction.apply(42L, w, i, c);
verify(mock).apply(eq(42L), eq(w), eq(i), eq(c));
// check close
windowFunction.close();
verify(mock).close();
}
use of org.apache.flink.util.Collector in project flink by apache.
the class InternalWindowFunctionTest method testInternalAggregateProcessAllWindowFunction.
@SuppressWarnings("unchecked")
@Test
public void testInternalAggregateProcessAllWindowFunction() throws Exception {
AggregateProcessAllWindowFunctionMock mock = mock(AggregateProcessAllWindowFunctionMock.class);
InternalAggregateProcessAllWindowFunction<Long, Set<Long>, Map<Long, Long>, String, TimeWindow> windowFunction = new InternalAggregateProcessAllWindowFunction<>(new AggregateFunction<Long, Set<Long>, Map<Long, Long>>() {
private static final long serialVersionUID = 1L;
@Override
public Set<Long> createAccumulator() {
return new HashSet<>();
}
@Override
public void add(Long value, Set<Long> accumulator) {
accumulator.add(value);
}
@Override
public Map<Long, Long> getResult(Set<Long> accumulator) {
Map<Long, Long> result = new HashMap<>();
for (Long in : accumulator) {
result.put(in, in);
}
return result;
}
@Override
public Set<Long> merge(Set<Long> a, Set<Long> b) {
a.addAll(b);
return a;
}
}, mock);
// check setOutputType
TypeInformation<String> stringType = BasicTypeInfo.STRING_TYPE_INFO;
ExecutionConfig execConf = new ExecutionConfig();
execConf.setParallelism(42);
StreamingFunctionUtils.setOutputType(windowFunction, stringType, execConf);
verify(mock).setOutputType(stringType, execConf);
// check open
Configuration config = new Configuration();
windowFunction.open(config);
verify(mock).open(config);
// check setRuntimeContext
RuntimeContext rCtx = mock(RuntimeContext.class);
windowFunction.setRuntimeContext(rCtx);
verify(mock).setRuntimeContext(rCtx);
// check apply
TimeWindow w = mock(TimeWindow.class);
Collector<String> c = (Collector<String>) mock(Collector.class);
List<Long> args = new LinkedList<>();
args.add(23L);
args.add(24L);
windowFunction.apply(((byte) 0), w, args, c);
verify(mock).process((AggregateProcessAllWindowFunctionMock.Context) anyObject(), (Iterable) argThat(containsInAnyOrder(allOf(hasEntry(is(23L), is(23L)), hasEntry(is(24L), is(24L))))), eq(c));
// check close
windowFunction.close();
verify(mock).close();
}
use of org.apache.flink.util.Collector in project flink by apache.
the class MiscellaneousIssuesITCase method testAccumulatorsAfterNoOp.
@Test
public void testAccumulatorsAfterNoOp() {
final String ACC_NAME = "test_accumulator";
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(6);
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 1000000).rebalance().flatMap(new RichFlatMapFunction<Long, Long>() {
private LongCounter counter;
@Override
public void open(Configuration parameters) {
counter = getRuntimeContext().getLongCounter(ACC_NAME);
}
@Override
public void flatMap(Long value, Collector<Long> out) {
counter.add(1L);
}
}).output(new DiscardingOutputFormat<Long>());
JobExecutionResult result = env.execute();
assertEquals(1000000L, result.getAllAccumulatorResults().get(ACC_NAME));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.util.Collector in project flink by apache.
the class GroupReduceITCase method testGroupReduceSelectorKeysWithSemProps.
@Test
public void testGroupReduceSelectorKeysWithSemProps() throws Exception {
/*
* Test that semantic properties are correctly adapted when using Selector Keys
*/
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds = CollectionDataSets.get5TupleDataSet(env);
DataSet<Tuple2<Integer, Long>> reduceDs = ds.groupBy(new KeySelector<Tuple5<Integer, Long, Integer, String, Long>, Long>() {
@Override
public Long getKey(Tuple5<Integer, Long, Integer, String, Long> v) throws Exception {
return (v.f0 * v.f1) - (v.f2 * v.f4);
}
}).reduceGroup(new GroupReduceFunction<Tuple5<Integer, Long, Integer, String, Long>, Tuple5<Integer, Long, Integer, String, Long>>() {
@Override
public void reduce(Iterable<Tuple5<Integer, Long, Integer, String, Long>> values, Collector<Tuple5<Integer, Long, Integer, String, Long>> out) throws Exception {
for (Tuple5<Integer, Long, Integer, String, Long> v : values) {
out.collect(v);
}
}
}).withForwardedFields("0").groupBy(0).reduceGroup(new GroupReduceFunction<Tuple5<Integer, Long, Integer, String, Long>, Tuple2<Integer, Long>>() {
@Override
public void reduce(Iterable<Tuple5<Integer, Long, Integer, String, Long>> values, Collector<Tuple2<Integer, Long>> out) throws Exception {
int k = 0;
long s = 0;
for (Tuple5<Integer, Long, Integer, String, Long> v : values) {
k = v.f0;
s += v.f1;
}
out.collect(new Tuple2<>(k, s));
}
});
List<Tuple2<Integer, Long>> result = reduceDs.collect();
String expected = "1,1\n" + "2,5\n" + "3,15\n" + "4,34\n" + "5,65\n";
compareResultAsTuples(result, expected);
}
Aggregations