Search in sources :

Example 96 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class WindowCheckpointingITCase method testAggregatingSlidingProcessingTimeWindow.

@Test
public void testAggregatingSlidingProcessingTimeWindow() {
    final int NUM_ELEMENTS = 3000;
    FailingSource.reset();
    try {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
        env.setParallelism(PARALLELISM);
        env.setStreamTimeCharacteristic(timeCharacteristic);
        env.getConfig().setAutoWatermarkInterval(10);
        env.enableCheckpointing(100);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0));
        env.getConfig().disableSysoutLogging();
        env.addSource(new FailingSource(NUM_ELEMENTS, NUM_ELEMENTS / 3)).map(new MapFunction<Tuple2<Long, IntType>, Tuple2<Long, IntType>>() {

            @Override
            public Tuple2<Long, IntType> map(Tuple2<Long, IntType> value) {
                value.f1.value = 1;
                return value;
            }
        }).rebalance().keyBy(0).timeWindow(Time.of(150, MILLISECONDS), Time.of(50, MILLISECONDS)).reduce(new ReduceFunction<Tuple2<Long, IntType>>() {

            @Override
            public Tuple2<Long, IntType> reduce(Tuple2<Long, IntType> a, Tuple2<Long, IntType> b) {
                return new Tuple2<>(a.f0, new IntType(1));
            }
        }).addSink(new ValidatingSink(NUM_ELEMENTS, 3)).setParallelism(1);
        tryExecute(env, "Tumbling Window Test");
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : SuccessException(org.apache.flink.test.util.SuccessException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 97 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class WindowCheckpointingITCase method testAggregatingTumblingProcessingTimeWindow.

@Test
public void testAggregatingTumblingProcessingTimeWindow() {
    final int NUM_ELEMENTS = 3000;
    FailingSource.reset();
    try {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
        env.setParallelism(PARALLELISM);
        env.setStreamTimeCharacteristic(timeCharacteristic);
        env.getConfig().setAutoWatermarkInterval(10);
        env.enableCheckpointing(100);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0));
        env.getConfig().disableSysoutLogging();
        env.addSource(new FailingSource(NUM_ELEMENTS, NUM_ELEMENTS / 3)).map(new MapFunction<Tuple2<Long, IntType>, Tuple2<Long, IntType>>() {

            @Override
            public Tuple2<Long, IntType> map(Tuple2<Long, IntType> value) {
                value.f1.value = 1;
                return value;
            }
        }).rebalance().keyBy(0).timeWindow(Time.of(100, MILLISECONDS)).reduce(new ReduceFunction<Tuple2<Long, IntType>>() {

            @Override
            public Tuple2<Long, IntType> reduce(Tuple2<Long, IntType> a, Tuple2<Long, IntType> b) {
                return new Tuple2<>(a.f0, new IntType(1));
            }
        }).addSink(new ValidatingSink(NUM_ELEMENTS, 1)).setParallelism(1);
        tryExecute(env, "Tumbling Window Test");
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : SuccessException(org.apache.flink.test.util.SuccessException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 98 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class WindowCheckpointingITCase method testTumblingProcessingTimeWindow.

// ------------------------------------------------------------------------
@Test
public void testTumblingProcessingTimeWindow() {
    final int NUM_ELEMENTS = 3000;
    FailingSource.reset();
    try {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
        env.setParallelism(PARALLELISM);
        env.setStreamTimeCharacteristic(timeCharacteristic);
        env.getConfig().setAutoWatermarkInterval(10);
        env.enableCheckpointing(100);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0));
        env.getConfig().disableSysoutLogging();
        env.addSource(new FailingSource(NUM_ELEMENTS, NUM_ELEMENTS / 3)).rebalance().keyBy(0).timeWindow(Time.of(100, MILLISECONDS)).apply(new RichWindowFunction<Tuple2<Long, IntType>, Tuple2<Long, IntType>, Tuple, TimeWindow>() {

            private boolean open = false;

            @Override
            public void open(Configuration parameters) {
                assertEquals(PARALLELISM, getRuntimeContext().getNumberOfParallelSubtasks());
                open = true;
            }

            @Override
            public void apply(Tuple tuple, TimeWindow window, Iterable<Tuple2<Long, IntType>> values, Collector<Tuple2<Long, IntType>> out) {
                // validate that the function has been opened properly
                assertTrue(open);
                for (Tuple2<Long, IntType> value : values) {
                    assertEquals(value.f0.intValue(), value.f1.value);
                    out.collect(new Tuple2<Long, IntType>(value.f0, new IntType(1)));
                }
            }
        }).addSink(new ValidatingSink(NUM_ELEMENTS, 1)).setParallelism(1);
        tryExecute(env, "Tumbling Window Test");
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) SuccessException(org.apache.flink.test.util.SuccessException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Tuple(org.apache.flink.api.java.tuple.Tuple) Test(org.junit.Test)

Example 99 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11.

@Test
public void testSavepointRestoreFromFlink11() throws Exception {
    final int EXPECTED_SUCCESSFUL_CHECKS = 21;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
    // we only test memory state backend yet
    env.setStateBackend(new MemoryStateBackend());
    env.enableCheckpointing(500);
    env.setParallelism(4);
    env.setMaxParallelism(4);
    // create source
    env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
    }.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
    restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
Also used : TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 100 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class StatefulUDFSavepointMigrationITCase method testSavepointRestoreFromFlink11FromRocksDB.

@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {
    final int EXPECTED_SUCCESSFUL_CHECKS = 21;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
    // we only test memory state backend yet
    env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
    env.enableCheckpointing(500);
    env.setParallelism(4);
    env.setMaxParallelism(4);
    // create source
    env.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
    }.getTypeInfo(), new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
    restoreAndExecute(env, getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb"), new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
Also used : RocksDBStateBackend(org.apache.flink.contrib.streaming.state.RocksDBStateBackend) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Aggregations

Tuple2 (org.apache.flink.api.java.tuple.Tuple2)1159 Test (org.junit.Test)871 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)486 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)266 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)195 TimeWindow (org.apache.flink.streaming.api.windowing.windows.TimeWindow)137 ArrayList (java.util.ArrayList)136 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)103 Plan (org.apache.flink.api.common.Plan)103 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)103 OptimizedPlan (org.apache.flink.optimizer.plan.OptimizedPlan)99 Configuration (org.apache.flink.configuration.Configuration)87 List (java.util.List)82 IOException (java.io.IOException)79 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)77 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)74 HashMap (java.util.HashMap)72 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)66 Collection (java.util.Collection)61 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)60