use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class RocksDBAggregatingStateTest method testMerging.
@Test
public void testMerging() throws Exception {
final AggregatingStateDescriptor<Long, MutableLong, Long> stateDescr = new AggregatingStateDescriptor<>("my-state", new AddingFunction(), MutableLong.class);
stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
final TimeWindow win1 = new TimeWindow(1000, 2000);
final TimeWindow win2 = new TimeWindow(2000, 3000);
final TimeWindow win3 = new TimeWindow(3000, 4000);
final Long expectedResult = 165L;
final RocksDBStateBackend backend = new RocksDBStateBackend(tmp.newFolder().toURI());
backend.setDbStoragePath(tmp.newFolder().getAbsolutePath());
final RocksDBKeyedStateBackend<String> keyedBackend = createKeyedBackend(backend);
try {
InternalAggregatingState<TimeWindow, Long, Long> state = keyedBackend.createAggregatingState(new TimeWindow.Serializer(), stateDescr);
// populate the different namespaces
// - abc spreads the values over three namespaces
// - def spreads teh values over two namespaces (one empty)
// - ghi is empty
// - jkl has all elements already in the target namespace
// - mno has all elements already in one source namespace
keyedBackend.setCurrentKey("abc");
state.setCurrentNamespace(win1);
state.add(33L);
state.add(55L);
state.setCurrentNamespace(win2);
state.add(22L);
state.add(11L);
state.setCurrentNamespace(win3);
state.add(44L);
keyedBackend.setCurrentKey("def");
state.setCurrentNamespace(win1);
state.add(11L);
state.add(44L);
state.setCurrentNamespace(win3);
state.add(22L);
state.add(55L);
state.add(33L);
keyedBackend.setCurrentKey("jkl");
state.setCurrentNamespace(win1);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("mno");
state.setCurrentNamespace(win3);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("abc");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("def");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("ghi");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertNull(state.get());
keyedBackend.setCurrentKey("jkl");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("mno");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
} finally {
keyedBackend.close();
keyedBackend.dispose();
}
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class HeapAggregatingStateTest method testAddAndGet.
@Test
public void testAddAndGet() throws Exception {
final AggregatingStateDescriptor<Long, MutableLong, Long> stateDescr = new AggregatingStateDescriptor<>("my-state", new AddingFunction(), MutableLong.class);
stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
final HeapKeyedStateBackend<String> keyedBackend = createKeyedBackend();
try {
InternalAggregatingState<VoidNamespace, Long, Long> state = keyedBackend.createAggregatingState(VoidNamespaceSerializer.INSTANCE, stateDescr);
state.setCurrentNamespace(VoidNamespace.INSTANCE);
keyedBackend.setCurrentKey("abc");
assertNull(state.get());
keyedBackend.setCurrentKey("def");
assertNull(state.get());
state.add(17L);
state.add(11L);
assertEquals(28L, state.get().longValue());
keyedBackend.setCurrentKey("abc");
assertNull(state.get());
keyedBackend.setCurrentKey("g");
assertNull(state.get());
state.add(1L);
state.add(2L);
keyedBackend.setCurrentKey("def");
assertEquals(28L, state.get().longValue());
state.clear();
assertNull(state.get());
keyedBackend.setCurrentKey("g");
state.add(3L);
state.add(2L);
state.add(1L);
keyedBackend.setCurrentKey("def");
assertNull(state.get());
keyedBackend.setCurrentKey("g");
assertEquals(9L, state.get().longValue());
state.clear();
// make sure all lists / maps are cleared
StateTable<String, VoidNamespace, MutableLong> stateTable = ((HeapAggregatingState<String, VoidNamespace, Long, MutableLong, Long>) state).stateTable;
assertTrue(stateTable.isEmpty());
} finally {
keyedBackend.close();
keyedBackend.dispose();
}
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class AllWindowedStream method aggregate.
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction The aggregation function that is used for incremental aggregation.
* @param windowFunction The window function.
* @param accumulatorType Type information for the internal accumulator type of the aggregation function
* @param resultType Type information for the result type of the window function
*
* @return The data stream that is the result of applying the window function to the window.
*
* @param <ACC> The type of the AggregateFunction's accumulator
* @param <V> The type of AggregateFunction's result, and the WindowFunction's input
* @param <R> The type of the elements in the resulting stream, equal to the
* WindowFunction's result type
*/
@PublicEvolving
public <ACC, V, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, AllWindowFunction<V, R, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
checkNotNull(aggregateResultType, "aggregateResultType");
checkNotNull(resultType, "resultType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction.");
}
//clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction);
final String callLocation = Utils.getCallLocationName();
final String udfName = "AllWindowedStream." + callLocation;
final String opName;
final KeySelector<T, Byte> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({ "unchecked", "rawtypes" }) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()));
ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")";
operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableAllWindowFunction<>(new AggregateApplyAllWindowFunction<>(aggregateFunction, windowFunction)), trigger, evictor, allowedLateness, lateDataOutputTag);
} else {
AggregatingStateDescriptor<T, ACC, V> stateDesc = new AggregatingStateDescriptor<>("window-contents", aggregateFunction, accumulatorType.createSerializer(getExecutionEnvironment().getConfig()));
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueAllWindowFunction<>(windowFunction), trigger, allowedLateness, lateDataOutputTag);
}
return input.transform(opName, resultType, operator).forceNonParallel();
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class WindowedStream method aggregate.
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction The aggregation function that is used for incremental aggregation.
* @param windowFunction The window function.
* @param accumulatorType Type information for the internal accumulator type of the aggregation function
* @param resultType Type information for the result type of the window function
*
* @return The data stream that is the result of applying the window function to the window.
*
* @param <ACC> The type of the AggregateFunction's accumulator
* @param <V> The type of AggregateFunction's result, and the WindowFunction's input
* @param <R> The type of the elements in the resulting stream, equal to the
* WindowFunction's result type
*/
@PublicEvolving
public <ACC, V, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, ProcessWindowFunction<V, R, K, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
checkNotNull(aggregateResultType, "aggregateResultType");
checkNotNull(resultType, "resultType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction.");
}
//clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction);
String callLocation = Utils.getCallLocationName();
String udfName = "WindowedStream." + callLocation;
String opName;
KeySelector<T, K> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({ "unchecked", "rawtypes" }) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()));
ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")";
operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalAggregateProcessWindowFunction<>(aggregateFunction, windowFunction), trigger, evictor, allowedLateness, lateDataOutputTag);
} else {
AggregatingStateDescriptor<T, ACC, V> stateDesc = new AggregatingStateDescriptor<>("window-contents", aggregateFunction, accumulatorType.createSerializer(getExecutionEnvironment().getConfig()));
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueProcessWindowFunction<>(windowFunction), trigger, allowedLateness, lateDataOutputTag);
}
return input.transform(opName, resultType, operator);
}
use of org.apache.flink.api.common.state.AggregatingStateDescriptor in project flink by apache.
the class AbstractQueryableStateTestBase method testAggregatingState.
@Test
public void testAggregatingState() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final long numElements = 1024L;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
final AggregatingStateDescriptor<Tuple2<Integer, Long>, String, String> aggrStateDescriptor = new AggregatingStateDescriptor<>("aggregates", new SumAggr(), String.class);
aggrStateDescriptor.setQueryable("aggr-queryable");
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 8470749712274833552L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).transform("TestAggregatingOperator", BasicTypeInfo.STRING_TYPE_INFO, new AggregatingTestOperator(aggrStateDescriptor));
try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
final JobID jobId = autoCancellableJob.getJobId();
final JobGraph jobGraph = autoCancellableJob.getJobGraph();
clusterClient.submitJob(jobGraph).get();
for (int key = 0; key < maxParallelism; key++) {
boolean success = false;
while (deadline.hasTimeLeft() && !success) {
CompletableFuture<AggregatingState<Tuple2<Integer, Long>, String>> future = getKvState(deadline, client, jobId, "aggr-queryable", key, BasicTypeInfo.INT_TYPE_INFO, aggrStateDescriptor, false, executor);
String value = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS).get();
if (Long.parseLong(value) == numElements * (numElements + 1L) / 2L) {
success = true;
} else {
// Retry
Thread.sleep(RETRY_TIMEOUT);
}
}
assertTrue("Did not succeed query", success);
}
}
}
Aggregations