Search in sources :

Example 16 with KeySelector

use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.

the class StickyAllocationAndLocalRecoveryTestJob method main.

public static void main(String[] args) throws Exception {
    final ParameterTool pt = ParameterTool.fromArgs(args);
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(pt.getInt("parallelism", 1));
    env.setMaxParallelism(pt.getInt("maxParallelism", pt.getInt("parallelism", 1)));
    env.enableCheckpointing(pt.getInt("checkpointInterval", 1000));
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, pt.getInt("restartDelay", 0)));
    if (pt.getBoolean("externalizedCheckpoints", false)) {
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
    }
    String checkpointDir = pt.getRequired("checkpointDir");
    env.getCheckpointConfig().setCheckpointStorage(checkpointDir);
    boolean killJvmOnFail = pt.getBoolean("killJvmOnFail", false);
    String stateBackend = pt.get("stateBackend", "hashmap");
    if ("hashmap".equals(stateBackend)) {
        env.setStateBackend(new HashMapStateBackend());
    } else if ("rocks".equals(stateBackend)) {
        boolean incrementalCheckpoints = pt.getBoolean("incrementalCheckpoints", false);
        env.setStateBackend(new EmbeddedRocksDBStateBackend(incrementalCheckpoints));
    } else {
        throw new IllegalArgumentException("Unknown backend: " + stateBackend);
    }
    // make parameters available in the web interface
    env.getConfig().setGlobalJobParameters(pt);
    // delay to throttle down the production of the source
    long delay = pt.getLong("delay", 0L);
    // the maximum number of attempts, before the job finishes with success
    int maxAttempts = pt.getInt("maxAttempts", 3);
    // size of one artificial value
    int valueSize = pt.getInt("valueSize", 10);
    env.addSource(new RandomLongSource(maxAttempts, delay)).keyBy((KeySelector<Long, Long>) aLong -> aLong).flatMap(new StateCreatingFlatMap(valueSize, killJvmOnFail)).addSink(new PrintSinkFunction<>());
    env.execute("Sticky Allocation And Local Recovery Test");
}
Also used : ParameterTool(org.apache.flink.api.java.utils.ParameterTool) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) PrintSinkFunction(org.apache.flink.streaming.api.functions.sink.PrintSinkFunction) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListState(org.apache.flink.api.common.state.ListState) ParameterTool(org.apache.flink.api.java.utils.ParameterTool) CheckpointListener(org.apache.flink.api.common.state.CheckpointListener) Collector(org.apache.flink.util.Collector) RichParallelSourceFunction(org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) KeySelector(org.apache.flink.api.java.functions.KeySelector) Iterator(java.util.Iterator) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Set(java.util.Set) IOException(java.io.IOException) Preconditions(org.apache.flink.util.Preconditions) Serializable(java.io.Serializable) List(java.util.List) ValueState(org.apache.flink.api.common.state.ValueState) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) RandomStringUtils(org.apache.commons.lang3.RandomStringUtils) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend)

Example 17 with KeySelector

use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.

the class DataSetAllroundTestProgram method main.

@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
    // get parameters
    ParameterTool params = ParameterTool.fromArgs(args);
    int loadFactor = Integer.parseInt(params.getRequired("loadFactor"));
    String outputPath = params.getRequired("outputPath");
    boolean infinite = params.getBoolean("infinite", false);
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    int numKeys = loadFactor * 128 * 1024;
    DataSet<Tuple2<String, Integer>> x1Keys;
    DataSet<Tuple2<String, Integer>> x2Keys = env.createInput(Generator.generate(numKeys * 32, 2)).setParallelism(4);
    DataSet<Tuple2<String, Integer>> x8Keys = env.createInput(Generator.generate(numKeys, 8)).setParallelism(4);
    if (infinite) {
        x1Keys = env.createInput(Generator.generateInfinitely(numKeys)).setParallelism(4);
    } else {
        x1Keys = env.createInput(Generator.generate(numKeys, 1)).setParallelism(4);
    }
    DataSet<Tuple2<String, Integer>> joined = x2Keys.map(x -> Tuple4.of("0-0", 0L, 1, x.f0)).returns(Types.TUPLE(Types.STRING, Types.LONG, Types.INT, Types.STRING)).join(x8Keys).where(3).equalTo(0).with((l, r) -> Tuple2.of(l.f3, 1)).returns(Types.TUPLE(Types.STRING, Types.INT)).groupBy(new KeySelector<Tuple2<String, Integer>, String>() {

        @Override
        public String getKey(Tuple2<String, Integer> value) {
            return value.f0;
        }
    }).reduce((value1, value2) -> Tuple2.of(value1.f0, value1.f1 + value2.f1));
    // co-group two datasets on their primary keys.
    // we filter both inputs such that only 6.25% of the keys overlap.
    // result: (key, cnt), #keys records with unique keys, cnt = (6.25%: 2, 93.75%: 1)
    DataSet<Tuple2<String, Integer>> coGrouped = x1Keys.filter(x -> x.f1 > 59).coGroup(x1Keys.filter(x -> x.f1 < 68)).where("f0").equalTo("f0").with((CoGroupFunction<Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>>) (l, r, out) -> {
        int cnt = 0;
        String key = "";
        for (Tuple2<String, Integer> t : l) {
            cnt++;
            key = t.f0;
        }
        for (Tuple2<String, Integer> t : r) {
            cnt++;
            key = t.f0;
        }
        out.collect(Tuple2.of(key, cnt));
    }).returns(Types.TUPLE(Types.STRING, Types.INT));
    // join datasets on keys (1-1 join) and replicate by 16 (previously computed count)
    // result: (key, cnt), 16 * #keys records, all keys preserved, cnt = (6.25%: 2, 93.75%: 1)
    DataSet<Tuple2<String, Integer>> joined2 = joined.join(coGrouped, JoinOperatorBase.JoinHint.REPARTITION_SORT_MERGE).where(0).equalTo("f0").flatMap((FlatMapFunction<Tuple2<Tuple2<String, Integer>, Tuple2<String, Integer>>, Tuple2<String, Integer>>) (p, out) -> {
        for (int i = 0; i < p.f0.f1; i++) {
            out.collect(Tuple2.of(p.f0.f0, p.f1.f1));
        }
    }).returns(Types.TUPLE(Types.STRING, Types.INT));
    // iteration. double the count field until all counts are at 32 or more
    // result: (key, cnt), 16 * #keys records, all keys preserved, cnt = (6.25%: 64, 93.75%: 32)
    IterativeDataSet<Tuple2<String, Integer>> initial = joined2.iterate(16);
    DataSet<Tuple2<String, Integer>> iteration = initial.map(x -> Tuple2.of(x.f0, x.f1 * 2)).returns(Types.TUPLE(Types.STRING, Types.INT));
    DataSet<Boolean> termination = iteration.flatMap((FlatMapFunction<Tuple2<String, Integer>, Boolean>) (x, out) -> {
        if (x.f1 < 32) {
            out.collect(false);
        }
    }).returns(Types.BOOLEAN);
    DataSet<Tuple2<Integer, Integer>> result = initial.closeWith(iteration, termination).groupBy(1).reduceGroup((GroupReduceFunction<Tuple2<String, Integer>, Tuple2<Integer, Integer>>) (g, out) -> {
        int key = 0;
        int cnt = 0;
        for (Tuple2<String, Integer> r : g) {
            key = r.f1;
            cnt++;
        }
        out.collect(Tuple2.of(key, cnt));
    }).returns(Types.TUPLE(Types.INT, Types.INT)).map(x -> Tuple2.of(x.f0, x.f1 / (loadFactor * 128))).returns(Types.TUPLE(Types.INT, Types.INT));
    // sort and emit result
    result.sortPartition(0, Order.ASCENDING).setParallelism(1).writeAsText(outputPath, FileSystem.WriteMode.OVERWRITE).setParallelism(1);
    env.execute();
}
Also used : ParameterTool(org.apache.flink.api.java.utils.ParameterTool) Types(org.apache.flink.api.common.typeinfo.Types) KeySelector(org.apache.flink.api.java.functions.KeySelector) JoinOperatorBase(org.apache.flink.api.common.operators.base.JoinOperatorBase) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple4(org.apache.flink.api.java.tuple.Tuple4) GroupReduceFunction(org.apache.flink.api.common.functions.GroupReduceFunction) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) ParameterTool(org.apache.flink.api.java.utils.ParameterTool) CoGroupFunction(org.apache.flink.api.common.functions.CoGroupFunction) DataSet(org.apache.flink.api.java.DataSet) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) FileSystem(org.apache.flink.core.fs.FileSystem) Order(org.apache.flink.api.common.operators.Order) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) GroupReduceFunction(org.apache.flink.api.common.functions.GroupReduceFunction) KeySelector(org.apache.flink.api.java.functions.KeySelector) CoGroupFunction(org.apache.flink.api.common.functions.CoGroupFunction) Tuple2(org.apache.flink.api.java.tuple.Tuple2) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction)

Example 18 with KeySelector

use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.

the class TypeExtractorTest method testValue.

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testValue() {
    // use getKeyExtractorType()
    KeySelector<?, ?> function = new KeySelector<StringValue, StringValue>() {

        private static final long serialVersionUID = 1L;

        @Override
        public StringValue getKey(StringValue value) {
            return null;
        }
    };
    TypeInformation<?> ti = TypeExtractor.getKeySelectorTypes(function, (TypeInformation) TypeInformation.of(new TypeHint<StringValue>() {
    }));
    Assert.assertFalse(ti.isBasicType());
    Assert.assertFalse(ti.isTupleType());
    Assert.assertTrue(ti instanceof ValueTypeInfo);
    Assert.assertEquals(ti.getTypeClass(), StringValue.class);
    // use getForClass()
    Assert.assertTrue(TypeExtractor.getForClass(StringValue.class) instanceof ValueTypeInfo);
    Assert.assertEquals(TypeExtractor.getForClass(StringValue.class).getTypeClass(), ti.getTypeClass());
    // use getForObject()
    StringValue v = new StringValue("Hello");
    Assert.assertTrue(TypeExtractor.getForObject(v) instanceof ValueTypeInfo);
    Assert.assertEquals(TypeExtractor.getForObject(v).getTypeClass(), ti.getTypeClass());
}
Also used : KeySelector(org.apache.flink.api.java.functions.KeySelector) StringValue(org.apache.flink.types.StringValue) Test(org.junit.Test)

Example 19 with KeySelector

use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.

the class GroupingTest method testGroupSortKeyFields3.

@Test(expected = InvalidProgramException.class)
public void testGroupSortKeyFields3() {
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<Long> longDs = env.fromCollection(emptyLongData, BasicTypeInfo.LONG_TYPE_INFO);
    // should not work: sorted groups on groupings by key selectors
    longDs.groupBy(new KeySelector<Long, Long>() {

        private static final long serialVersionUID = 1L;

        @Override
        public Long getKey(Long value) {
            return value;
        }
    }).sortGroup(0, Order.ASCENDING);
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) KeySelector(org.apache.flink.api.java.functions.KeySelector) Test(org.junit.Test)

Example 20 with KeySelector

use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.

the class CEPMigrationTest method testRestoreStartingNewPatternAfterMigration.

@Test
public void testRestoreStartingNewPatternAfterMigration() throws Exception {
    KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {

        private static final long serialVersionUID = -4873366487571254798L;

        @Override
        public Integer getKey(Event value) throws Exception {
            return value.getId();
        }
    };
    final Event startEvent1 = new Event(42, "start", 1.0);
    final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
    final Event startEvent2 = new Event(42, "start", 5.0);
    final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);
    final Event endEvent = new Event(42, "end", 1.0);
    OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
    try {
        harness.setup();
        harness.initializeState(OperatorSnapshotUtil.getResourceFilename("cep-migration-starting-new-pattern-flink" + migrateVersion + "-snapshot"));
        harness.open();
        harness.processElement(new StreamRecord<>(startEvent2, 5));
        harness.processElement(new StreamRecord<Event>(middleEvent2, 6));
        harness.processElement(new StreamRecord<>(endEvent, 7));
        harness.processWatermark(new Watermark(20));
        ConcurrentLinkedQueue<Object> result = harness.getOutput();
        // watermark and 3 results
        assertEquals(4, result.size());
        Object resultObject1 = result.poll();
        assertTrue(resultObject1 instanceof StreamRecord);
        StreamRecord<?> resultRecord1 = (StreamRecord<?>) resultObject1;
        assertTrue(resultRecord1.getValue() instanceof Map);
        Object resultObject2 = result.poll();
        assertTrue(resultObject2 instanceof StreamRecord);
        StreamRecord<?> resultRecord2 = (StreamRecord<?>) resultObject2;
        assertTrue(resultRecord2.getValue() instanceof Map);
        Object resultObject3 = result.poll();
        assertTrue(resultObject3 instanceof StreamRecord);
        StreamRecord<?> resultRecord3 = (StreamRecord<?>) resultObject3;
        assertTrue(resultRecord3.getValue() instanceof Map);
        @SuppressWarnings("unchecked") Map<String, List<Event>> patternMap1 = (Map<String, List<Event>>) resultRecord1.getValue();
        assertEquals(startEvent1, patternMap1.get("start").get(0));
        assertEquals(middleEvent1, patternMap1.get("middle").get(0));
        assertEquals(endEvent, patternMap1.get("end").get(0));
        @SuppressWarnings("unchecked") Map<String, List<Event>> patternMap2 = (Map<String, List<Event>>) resultRecord2.getValue();
        assertEquals(startEvent1, patternMap2.get("start").get(0));
        assertEquals(middleEvent2, patternMap2.get("middle").get(0));
        assertEquals(endEvent, patternMap2.get("end").get(0));
        @SuppressWarnings("unchecked") Map<String, List<Event>> patternMap3 = (Map<String, List<Event>>) resultRecord3.getValue();
        assertEquals(startEvent2, patternMap3.get("start").get(0));
        assertEquals(middleEvent2, patternMap3.get("middle").get(0));
        assertEquals(endEvent, patternMap3.get("end").get(0));
        // and now go for a checkpoint with the new serializers
        final Event startEvent3 = new Event(42, "start", 2.0);
        final SubEvent middleEvent3 = new SubEvent(42, "foo", 1.0, 11.0);
        final Event endEvent1 = new Event(42, "end", 2.0);
        harness.processElement(new StreamRecord<Event>(startEvent3, 21));
        harness.processElement(new StreamRecord<Event>(middleEvent3, 23));
        // simulate snapshot/restore with some elements in internal sorting queue
        OperatorSubtaskState snapshot = harness.snapshot(1L, 1L);
        harness.close();
        harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
        harness.setup();
        harness.initializeState(snapshot);
        harness.open();
        harness.processElement(new StreamRecord<>(endEvent1, 25));
        harness.processWatermark(new Watermark(50));
        result = harness.getOutput();
        // watermark and the result
        assertEquals(2, result.size());
        Object resultObject4 = result.poll();
        assertTrue(resultObject4 instanceof StreamRecord);
        StreamRecord<?> resultRecord4 = (StreamRecord<?>) resultObject4;
        assertTrue(resultRecord4.getValue() instanceof Map);
        @SuppressWarnings("unchecked") Map<String, List<Event>> patternMap4 = (Map<String, List<Event>>) resultRecord4.getValue();
        assertEquals(startEvent3, patternMap4.get("start").get(0));
        assertEquals(middleEvent3, patternMap4.get("middle").get(0));
        assertEquals(endEvent1, patternMap4.get("end").get(0));
    } finally {
        harness.close();
    }
}
Also used : SubEvent(org.apache.flink.cep.SubEvent) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) KeySelector(org.apache.flink.api.java.functions.KeySelector) KeyedOneInputStreamOperatorTestHarness(org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) Event(org.apache.flink.cep.Event) SubEvent(org.apache.flink.cep.SubEvent) List(java.util.List) Map(java.util.Map) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Aggregations

KeySelector (org.apache.flink.api.java.functions.KeySelector)120 Test (org.junit.Test)113 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)45 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)44 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)39 Watermark (org.apache.flink.streaming.api.watermark.Watermark)30 List (java.util.List)29 StreamRecord (org.apache.flink.streaming.runtime.streamrecord.StreamRecord)28 InvalidProgramException (org.apache.flink.api.common.InvalidProgramException)22 JobID (org.apache.flink.api.common.JobID)22 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)22 IOException (java.io.IOException)21 Arrays (java.util.Arrays)21 AtomicLong (java.util.concurrent.atomic.AtomicLong)21 Configuration (org.apache.flink.configuration.Configuration)21 KeyedOneInputStreamOperatorTestHarness (org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness)21 ArrayList (java.util.ArrayList)18 Map (java.util.Map)18 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)18 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)16