use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class AllWindowTranslationTest method testAggregateProcessingTime.
@Test
public void testAggregateProcessingTime() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2));
DataStream<Tuple2<String, Integer>> window1 = source.windowAll(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))).aggregate(new DummyAggregationFunction());
OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation();
OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator();
Assert.assertTrue(operator instanceof WindowOperator);
WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator;
Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger);
Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows);
Assert.assertTrue(winOperator.getStateDescriptor() instanceof AggregatingStateDescriptor);
processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1));
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class AllWindowTranslationTest method testAggregateWithWindowFunctionProcessingTime.
@Test
public void testAggregateWithWindowFunctionProcessingTime() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2));
DataStream<Tuple3<String, String, Integer>> window = source.windowAll(TumblingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS))).aggregate(new DummyAggregationFunction(), new TestAllWindowFunction());
OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation();
OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator();
Assert.assertTrue(operator instanceof WindowOperator);
WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator;
Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger);
Assert.assertTrue(winOperator.getWindowAssigner() instanceof TumblingProcessingTimeWindows);
Assert.assertTrue(winOperator.getStateDescriptor() instanceof AggregatingStateDescriptor);
processElementAndEnsureOutput(operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1));
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class RocksDBAggregatingStateTest method testAddAndGet.
// ------------------------------------------------------------------------
@Test
public void testAddAndGet() throws Exception {
final AggregatingStateDescriptor<Long, MutableLong, Long> stateDescr = new AggregatingStateDescriptor<>("my-state", new AddingFunction(), MutableLong.class);
stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
final RocksDBStateBackend backend = new RocksDBStateBackend(tmp.newFolder().toURI());
backend.setDbStoragePath(tmp.newFolder().getAbsolutePath());
final RocksDBKeyedStateBackend<String> keyedBackend = createKeyedBackend(backend);
try {
InternalAggregatingState<VoidNamespace, Long, Long> state = keyedBackend.createAggregatingState(VoidNamespaceSerializer.INSTANCE, stateDescr);
state.setCurrentNamespace(VoidNamespace.INSTANCE);
keyedBackend.setCurrentKey("abc");
assertNull(state.get());
keyedBackend.setCurrentKey("def");
assertNull(state.get());
state.add(17L);
state.add(11L);
assertEquals(28L, state.get().longValue());
keyedBackend.setCurrentKey("abc");
assertNull(state.get());
keyedBackend.setCurrentKey("g");
assertNull(state.get());
state.add(1L);
state.add(2L);
keyedBackend.setCurrentKey("def");
assertEquals(28L, state.get().longValue());
state.clear();
assertNull(state.get());
keyedBackend.setCurrentKey("g");
state.add(3L);
state.add(2L);
state.add(1L);
keyedBackend.setCurrentKey("def");
assertNull(state.get());
keyedBackend.setCurrentKey("g");
assertEquals(9L, state.get().longValue());
} finally {
keyedBackend.close();
keyedBackend.dispose();
}
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class HeapAggregatingStateTest method testMerging.
@Test
public void testMerging() throws Exception {
final AggregatingStateDescriptor<Long, MutableLong, Long> stateDescr = new AggregatingStateDescriptor<>("my-state", new AddingFunction(), MutableLong.class);
stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
final Integer namespace1 = 1;
final Integer namespace2 = 2;
final Integer namespace3 = 3;
final Long expectedResult = 165L;
final HeapKeyedStateBackend<String> keyedBackend = createKeyedBackend();
try {
InternalAggregatingState<Integer, Long, Long> state = keyedBackend.createAggregatingState(IntSerializer.INSTANCE, stateDescr);
// populate the different namespaces
// - abc spreads the values over three namespaces
// - def spreads teh values over two namespaces (one empty)
// - ghi is empty
// - jkl has all elements already in the target namespace
// - mno has all elements already in one source namespace
keyedBackend.setCurrentKey("abc");
state.setCurrentNamespace(namespace1);
state.add(33L);
state.add(55L);
state.setCurrentNamespace(namespace2);
state.add(22L);
state.add(11L);
state.setCurrentNamespace(namespace3);
state.add(44L);
keyedBackend.setCurrentKey("def");
state.setCurrentNamespace(namespace1);
state.add(11L);
state.add(44L);
state.setCurrentNamespace(namespace3);
state.add(22L);
state.add(55L);
state.add(33L);
keyedBackend.setCurrentKey("jkl");
state.setCurrentNamespace(namespace1);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("mno");
state.setCurrentNamespace(namespace3);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("abc");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("def");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("ghi");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertNull(state.get());
keyedBackend.setCurrentKey("jkl");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("mno");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
// make sure all lists / maps are cleared
keyedBackend.setCurrentKey("abc");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("def");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("ghi");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("jkl");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("mno");
state.setCurrentNamespace(namespace1);
state.clear();
StateTable<String, Integer, MutableLong> stateTable = ((HeapAggregatingState<String, Integer, Long, MutableLong, Long>) state).stateTable;
assertTrue(stateTable.isEmpty());
} finally {
keyedBackend.close();
keyedBackend.dispose();
}
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class WindowedStream method aggregate.
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction The aggregation function that is used for incremental aggregation.
* @param windowFunction The window function.
* @param accumulatorType Type information for the internal accumulator type of the aggregation function
* @param resultType Type information for the result type of the window function
*
* @return The data stream that is the result of applying the window function to the window.
*
* @param <ACC> The type of the AggregateFunction's accumulator
* @param <V> The type of AggregateFunction's result, and the WindowFunction's input
* @param <R> The type of the elements in the resulting stream, equal to the
* WindowFunction's result type
*/
@PublicEvolving
public <ACC, V, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, WindowFunction<V, R, K, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
checkNotNull(aggregateResultType, "aggregateResultType");
checkNotNull(resultType, "resultType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction.");
}
//clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction);
String callLocation = Utils.getCallLocationName();
String udfName = "WindowedStream." + callLocation;
String opName;
KeySelector<T, K> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({ "unchecked", "rawtypes" }) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()));
ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")";
operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableWindowFunction<>(new AggregateApplyWindowFunction<>(aggregateFunction, windowFunction)), trigger, evictor, allowedLateness, lateDataOutputTag);
} else {
AggregatingStateDescriptor<T, ACC, V> stateDesc = new AggregatingStateDescriptor<>("window-contents", aggregateFunction, accumulatorType.createSerializer(getExecutionEnvironment().getConfig()));
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueWindowFunction<>(windowFunction), trigger, allowedLateness, lateDataOutputTag);
}
return input.transform(opName, resultType, operator);
}
Aggregations