use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class RocksDBReducingStateTest method testMerging.
@Test
public void testMerging() throws Exception {
final ReducingStateDescriptor<Long> stateDescr = new ReducingStateDescriptor<>("my-state", new AddingFunction(), Long.class);
stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
final TimeWindow win1 = new TimeWindow(1000, 2000);
final TimeWindow win2 = new TimeWindow(2000, 3000);
final TimeWindow win3 = new TimeWindow(3000, 4000);
final Long expectedResult = 165L;
final RocksDBStateBackend backend = new RocksDBStateBackend(tmp.newFolder().toURI());
backend.setDbStoragePath(tmp.newFolder().getAbsolutePath());
final RocksDBKeyedStateBackend<String> keyedBackend = createKeyedBackend(backend);
try {
final InternalReducingState<TimeWindow, Long> state = keyedBackend.createReducingState(new TimeWindow.Serializer(), stateDescr);
// populate the different namespaces
// - abc spreads the values over three namespaces
// - def spreads teh values over two namespaces (one empty)
// - ghi is empty
// - jkl has all elements already in the target namespace
// - mno has all elements already in one source namespace
keyedBackend.setCurrentKey("abc");
state.setCurrentNamespace(win1);
state.add(33L);
state.add(55L);
state.setCurrentNamespace(win2);
state.add(22L);
state.add(11L);
state.setCurrentNamespace(win3);
state.add(44L);
keyedBackend.setCurrentKey("def");
state.setCurrentNamespace(win1);
state.add(11L);
state.add(44L);
state.setCurrentNamespace(win3);
state.add(22L);
state.add(55L);
state.add(33L);
keyedBackend.setCurrentKey("jkl");
state.setCurrentNamespace(win1);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("mno");
state.setCurrentNamespace(win3);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("abc");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("def");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("ghi");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertNull(state.get());
keyedBackend.setCurrentKey("jkl");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("mno");
state.mergeNamespaces(win1, asList(win2, win3));
state.setCurrentNamespace(win1);
assertEquals(expectedResult, state.get());
} finally {
keyedBackend.close();
keyedBackend.dispose();
}
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class AbstractQueryableStateITCase method testReducingState.
/**
* Tests simple reducing state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The reducing state instance sums these up. The test succeeds
* after each subtask index is queried with result n*(n+1)/2.
*/
@Test
public void testReducingState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(NUM_SLOTS);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
// Reducing state
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any", new SumReduce(), source.getType());
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("jungle", reducingState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// Wait until job is running
// Now query
long expected = numElements * (numElements + 1) / 2;
executeValueQuery(deadline, client, jobId, queryableState, expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster.getLeaderGateway(deadline.timeLeft()).ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class HeapReducingStateTest method testMerging.
@Test
public void testMerging() throws Exception {
final ReducingStateDescriptor<Long> stateDescr = new ReducingStateDescriptor<>("my-state", new AddingFunction(), Long.class);
stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
final Integer namespace1 = 1;
final Integer namespace2 = 2;
final Integer namespace3 = 3;
final Long expectedResult = 165L;
final HeapKeyedStateBackend<String> keyedBackend = createKeyedBackend();
try {
final InternalReducingState<Integer, Long> state = keyedBackend.createReducingState(IntSerializer.INSTANCE, stateDescr);
// populate the different namespaces
// - abc spreads the values over three namespaces
// - def spreads teh values over two namespaces (one empty)
// - ghi is empty
// - jkl has all elements already in the target namespace
// - mno has all elements already in one source namespace
keyedBackend.setCurrentKey("abc");
state.setCurrentNamespace(namespace1);
state.add(33L);
state.add(55L);
state.setCurrentNamespace(namespace2);
state.add(22L);
state.add(11L);
state.setCurrentNamespace(namespace3);
state.add(44L);
keyedBackend.setCurrentKey("def");
state.setCurrentNamespace(namespace1);
state.add(11L);
state.add(44L);
state.setCurrentNamespace(namespace3);
state.add(22L);
state.add(55L);
state.add(33L);
keyedBackend.setCurrentKey("jkl");
state.setCurrentNamespace(namespace1);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("mno");
state.setCurrentNamespace(namespace3);
state.add(11L);
state.add(22L);
state.add(33L);
state.add(44L);
state.add(55L);
keyedBackend.setCurrentKey("abc");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("def");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("ghi");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertNull(state.get());
keyedBackend.setCurrentKey("jkl");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
keyedBackend.setCurrentKey("mno");
state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
state.setCurrentNamespace(namespace1);
assertEquals(expectedResult, state.get());
// make sure all lists / maps are cleared
keyedBackend.setCurrentKey("abc");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("def");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("ghi");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("jkl");
state.setCurrentNamespace(namespace1);
state.clear();
keyedBackend.setCurrentKey("mno");
state.setCurrentNamespace(namespace1);
state.clear();
StateTable<String, Integer, Long> stateTable = ((HeapReducingState<String, Integer, Long>) state).stateTable;
assertTrue(stateTable.isEmpty());
} finally {
keyedBackend.close();
keyedBackend.dispose();
}
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class WindowedStream method reduce.
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>
* Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction The reduce function that is used for incremental aggregation.
* @param function The window function.
* @param resultType Type information for the result type of the window function.
* @param legacyWindowOpType When migrating from an older Flink version, this flag indicates
* the type of the previous operator whose state we inherit.
* @return The data stream that is the result of applying the window function to the window.
*/
private <R> SingleOutputStreamOperator<R> reduce(ReduceFunction<T> reduceFunction, WindowFunction<T, R, K, W> function, TypeInformation<R> resultType, LegacyWindowOperatorType legacyWindowOpType) {
if (reduceFunction instanceof RichFunction) {
throw new UnsupportedOperationException("ReduceFunction of reduce can not be a RichFunction.");
}
//clean the closures
function = input.getExecutionEnvironment().clean(function);
reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
String callLocation = Utils.getCallLocationName();
String udfName = "WindowedStream." + callLocation;
String opName;
KeySelector<T, K> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({ "unchecked", "rawtypes" }) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()));
ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")";
operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableWindowFunction<>(new ReduceApplyWindowFunction<>(reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag);
} else {
ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>("window-contents", reduceFunction, input.getType().createSerializer(getExecutionEnvironment().getConfig()));
opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag, legacyWindowOpType);
}
return input.transform(opName, resultType, operator);
}
use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.
the class AllWindowTranslationTest method testReduceProcessingTime.
@Test
@SuppressWarnings("rawtypes")
public void testReduceProcessingTime() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2));
DataStream<Tuple2<String, Integer>> window1 = source.windowAll(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))).reduce(new DummyReducer());
OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation();
OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator();
Assert.assertTrue(operator instanceof WindowOperator);
WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator;
Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger);
Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows);
Assert.assertTrue(winOperator.getStateDescriptor() instanceof ReducingStateDescriptor);
processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1));
}
Aggregations