Search in sources :

Example 1 with ReducingStateDescriptor

use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.

the class RocksDBReducingStateTest method testMerging.

@Test
public void testMerging() throws Exception {
    final ReducingStateDescriptor<Long> stateDescr = new ReducingStateDescriptor<>("my-state", new AddingFunction(), Long.class);
    stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
    final TimeWindow win1 = new TimeWindow(1000, 2000);
    final TimeWindow win2 = new TimeWindow(2000, 3000);
    final TimeWindow win3 = new TimeWindow(3000, 4000);
    final Long expectedResult = 165L;
    final RocksDBStateBackend backend = new RocksDBStateBackend(tmp.newFolder().toURI());
    backend.setDbStoragePath(tmp.newFolder().getAbsolutePath());
    final RocksDBKeyedStateBackend<String> keyedBackend = createKeyedBackend(backend);
    try {
        final InternalReducingState<TimeWindow, Long> state = keyedBackend.createReducingState(new TimeWindow.Serializer(), stateDescr);
        // populate the different namespaces
        //  - abc spreads the values over three namespaces
        //  - def spreads teh values over two namespaces (one empty)
        //  - ghi is empty
        //  - jkl has all elements already in the target namespace
        //  - mno has all elements already in one source namespace
        keyedBackend.setCurrentKey("abc");
        state.setCurrentNamespace(win1);
        state.add(33L);
        state.add(55L);
        state.setCurrentNamespace(win2);
        state.add(22L);
        state.add(11L);
        state.setCurrentNamespace(win3);
        state.add(44L);
        keyedBackend.setCurrentKey("def");
        state.setCurrentNamespace(win1);
        state.add(11L);
        state.add(44L);
        state.setCurrentNamespace(win3);
        state.add(22L);
        state.add(55L);
        state.add(33L);
        keyedBackend.setCurrentKey("jkl");
        state.setCurrentNamespace(win1);
        state.add(11L);
        state.add(22L);
        state.add(33L);
        state.add(44L);
        state.add(55L);
        keyedBackend.setCurrentKey("mno");
        state.setCurrentNamespace(win3);
        state.add(11L);
        state.add(22L);
        state.add(33L);
        state.add(44L);
        state.add(55L);
        keyedBackend.setCurrentKey("abc");
        state.mergeNamespaces(win1, asList(win2, win3));
        state.setCurrentNamespace(win1);
        assertEquals(expectedResult, state.get());
        keyedBackend.setCurrentKey("def");
        state.mergeNamespaces(win1, asList(win2, win3));
        state.setCurrentNamespace(win1);
        assertEquals(expectedResult, state.get());
        keyedBackend.setCurrentKey("ghi");
        state.mergeNamespaces(win1, asList(win2, win3));
        state.setCurrentNamespace(win1);
        assertNull(state.get());
        keyedBackend.setCurrentKey("jkl");
        state.mergeNamespaces(win1, asList(win2, win3));
        state.setCurrentNamespace(win1);
        assertEquals(expectedResult, state.get());
        keyedBackend.setCurrentKey("mno");
        state.mergeNamespaces(win1, asList(win2, win3));
        state.setCurrentNamespace(win1);
        assertEquals(expectedResult, state.get());
    } finally {
        keyedBackend.close();
        keyedBackend.dispose();
    }
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) Test(org.junit.Test)

Example 2 with ReducingStateDescriptor

use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.

the class AbstractQueryableStateITCase method testReducingState.

/**
	 * Tests simple reducing state queryable state instance. Each source emits
	 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
	 * queried. The reducing state instance sums these up. The test succeeds
	 * after each subtask index is queried with result n*(n+1)/2.
	 */
@Test
public void testReducingState() throws Exception {
    // Config
    final Deadline deadline = TEST_TIMEOUT.fromNow();
    final int numElements = 1024;
    final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
    JobID jobId = null;
    try {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStateBackend(stateBackend);
        env.setParallelism(NUM_SLOTS);
        // Very important, because cluster is shared between tests and we
        // don't explicitly check that all slots are available before
        // submitting.
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
        DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
        // Reducing state
        ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any", new SumReduce(), source.getType());
        QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

            @Override
            public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
                return value.f0;
            }
        }).asQueryableState("jungle", reducingState);
        // Submit the job graph
        JobGraph jobGraph = env.getStreamGraph().getJobGraph();
        jobId = jobGraph.getJobID();
        cluster.submitJobDetached(jobGraph);
        // Wait until job is running
        // Now query
        long expected = numElements * (numElements + 1) / 2;
        executeValueQuery(deadline, client, jobId, queryableState, expected);
    } finally {
        // Free cluster resources
        if (jobId != null) {
            Future<CancellationSuccess> cancellation = cluster.getLeaderGateway(deadline.timeLeft()).ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
            Await.ready(cancellation, deadline.timeLeft());
        }
        client.shutDown();
    }
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) Deadline(scala.concurrent.duration.Deadline) QueryableStateClient(org.apache.flink.runtime.query.QueryableStateClient) KeySelector(org.apache.flink.api.java.functions.KeySelector) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Tuple2(org.apache.flink.api.java.tuple.Tuple2) AtomicLong(java.util.concurrent.atomic.AtomicLong) CancellationSuccess(org.apache.flink.runtime.messages.JobManagerMessages.CancellationSuccess) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 3 with ReducingStateDescriptor

use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.

the class HeapReducingStateTest method testMerging.

@Test
public void testMerging() throws Exception {
    final ReducingStateDescriptor<Long> stateDescr = new ReducingStateDescriptor<>("my-state", new AddingFunction(), Long.class);
    stateDescr.initializeSerializerUnlessSet(new ExecutionConfig());
    final Integer namespace1 = 1;
    final Integer namespace2 = 2;
    final Integer namespace3 = 3;
    final Long expectedResult = 165L;
    final HeapKeyedStateBackend<String> keyedBackend = createKeyedBackend();
    try {
        final InternalReducingState<Integer, Long> state = keyedBackend.createReducingState(IntSerializer.INSTANCE, stateDescr);
        // populate the different namespaces
        //  - abc spreads the values over three namespaces
        //  - def spreads teh values over two namespaces (one empty)
        //  - ghi is empty
        //  - jkl has all elements already in the target namespace
        //  - mno has all elements already in one source namespace
        keyedBackend.setCurrentKey("abc");
        state.setCurrentNamespace(namespace1);
        state.add(33L);
        state.add(55L);
        state.setCurrentNamespace(namespace2);
        state.add(22L);
        state.add(11L);
        state.setCurrentNamespace(namespace3);
        state.add(44L);
        keyedBackend.setCurrentKey("def");
        state.setCurrentNamespace(namespace1);
        state.add(11L);
        state.add(44L);
        state.setCurrentNamespace(namespace3);
        state.add(22L);
        state.add(55L);
        state.add(33L);
        keyedBackend.setCurrentKey("jkl");
        state.setCurrentNamespace(namespace1);
        state.add(11L);
        state.add(22L);
        state.add(33L);
        state.add(44L);
        state.add(55L);
        keyedBackend.setCurrentKey("mno");
        state.setCurrentNamespace(namespace3);
        state.add(11L);
        state.add(22L);
        state.add(33L);
        state.add(44L);
        state.add(55L);
        keyedBackend.setCurrentKey("abc");
        state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
        state.setCurrentNamespace(namespace1);
        assertEquals(expectedResult, state.get());
        keyedBackend.setCurrentKey("def");
        state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
        state.setCurrentNamespace(namespace1);
        assertEquals(expectedResult, state.get());
        keyedBackend.setCurrentKey("ghi");
        state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
        state.setCurrentNamespace(namespace1);
        assertNull(state.get());
        keyedBackend.setCurrentKey("jkl");
        state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
        state.setCurrentNamespace(namespace1);
        assertEquals(expectedResult, state.get());
        keyedBackend.setCurrentKey("mno");
        state.mergeNamespaces(namespace1, asList(namespace2, namespace3));
        state.setCurrentNamespace(namespace1);
        assertEquals(expectedResult, state.get());
        // make sure all lists / maps are cleared
        keyedBackend.setCurrentKey("abc");
        state.setCurrentNamespace(namespace1);
        state.clear();
        keyedBackend.setCurrentKey("def");
        state.setCurrentNamespace(namespace1);
        state.clear();
        keyedBackend.setCurrentKey("ghi");
        state.setCurrentNamespace(namespace1);
        state.clear();
        keyedBackend.setCurrentKey("jkl");
        state.setCurrentNamespace(namespace1);
        state.clear();
        keyedBackend.setCurrentKey("mno");
        state.setCurrentNamespace(namespace1);
        state.clear();
        StateTable<String, Integer, Long> stateTable = ((HeapReducingState<String, Integer, Long>) state).stateTable;
        assertTrue(stateTable.isEmpty());
    } finally {
        keyedBackend.close();
        keyedBackend.dispose();
    }
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Test(org.junit.Test)

Example 4 with ReducingStateDescriptor

use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.

the class WindowedStream method reduce.

/**
	 * Applies the given window function to each window. The window function is called for each
	 * evaluation of the window for each key individually. The output of the window function is
	 * interpreted as a regular non-windowed stream.
	 *
	 * <p>
	 * Arriving data is incrementally aggregated using the given reducer.
	 *
	 * @param reduceFunction The reduce function that is used for incremental aggregation.
	 * @param function The window function.
	 * @param resultType Type information for the result type of the window function.
	 * @param legacyWindowOpType When migrating from an older Flink version, this flag indicates
	 *                           the type of the previous operator whose state we inherit.
	 * @return The data stream that is the result of applying the window function to the window.
	 */
private <R> SingleOutputStreamOperator<R> reduce(ReduceFunction<T> reduceFunction, WindowFunction<T, R, K, W> function, TypeInformation<R> resultType, LegacyWindowOperatorType legacyWindowOpType) {
    if (reduceFunction instanceof RichFunction) {
        throw new UnsupportedOperationException("ReduceFunction of reduce can not be a RichFunction.");
    }
    //clean the closures
    function = input.getExecutionEnvironment().clean(function);
    reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
    String callLocation = Utils.getCallLocationName();
    String udfName = "WindowedStream." + callLocation;
    String opName;
    KeySelector<T, K> keySel = input.getKeySelector();
    OneInputStreamOperator<T, R> operator;
    if (evictor != null) {
        @SuppressWarnings({ "unchecked", "rawtypes" }) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()));
        ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
        opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")";
        operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableWindowFunction<>(new ReduceApplyWindowFunction<>(reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag);
    } else {
        ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>("window-contents", reduceFunction, input.getType().createSerializer(getExecutionEnvironment().getConfig()));
        opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")";
        operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag, legacyWindowOpType);
    }
    return input.transform(opName, resultType, operator);
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) RichFunction(org.apache.flink.api.common.functions.RichFunction) InternalSingleValueWindowFunction(org.apache.flink.streaming.runtime.operators.windowing.functions.InternalSingleValueWindowFunction) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) InternalIterableWindowFunction(org.apache.flink.streaming.runtime.operators.windowing.functions.InternalIterableWindowFunction) TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) StreamElementSerializer(org.apache.flink.streaming.runtime.streamrecord.StreamElementSerializer)

Example 5 with ReducingStateDescriptor

use of org.apache.flink.api.common.state.ReducingStateDescriptor in project flink by apache.

the class AllWindowTranslationTest method testReduceProcessingTime.

@Test
@SuppressWarnings("rawtypes")
public void testReduceProcessingTime() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2));
    DataStream<Tuple2<String, Integer>> window1 = source.windowAll(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))).reduce(new DummyReducer());
    OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation();
    OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator();
    Assert.assertTrue(operator instanceof WindowOperator);
    WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator;
    Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger);
    Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows);
    Assert.assertTrue(winOperator.getStateDescriptor() instanceof ReducingStateDescriptor);
    processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1));
}
Also used : ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) ProcessingTimeTrigger(org.apache.flink.streaming.api.windowing.triggers.ProcessingTimeTrigger) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) SlidingProcessingTimeWindows(org.apache.flink.streaming.api.windowing.assigners.SlidingProcessingTimeWindows) Test(org.junit.Test)

Aggregations

ReducingStateDescriptor (org.apache.flink.api.common.state.ReducingStateDescriptor)67 Test (org.junit.Test)60 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)51 TimeWindow (org.apache.flink.streaming.api.windowing.windows.TimeWindow)38 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)35 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)27 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)26 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)23 Watermark (org.apache.flink.streaming.api.watermark.Watermark)21 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)19 PassThroughWindowFunction (org.apache.flink.streaming.api.functions.windowing.PassThroughWindowFunction)19 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)17 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)17 StreamRecord (org.apache.flink.streaming.runtime.streamrecord.StreamRecord)14 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)10 EventTimeTrigger (org.apache.flink.streaming.api.windowing.triggers.EventTimeTrigger)9 KeyedOneInputStreamOperatorTestHarness (org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness)8 TypeSerializer (org.apache.flink.api.common.typeutils.TypeSerializer)7 OperatorSubtaskState (org.apache.flink.runtime.checkpoint.OperatorSubtaskState)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)6