use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.
the class StickyAllocationAndLocalRecoveryTestJob method main.
public static void main(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(pt.getInt("parallelism", 1));
env.setMaxParallelism(pt.getInt("maxParallelism", pt.getInt("parallelism", 1)));
env.enableCheckpointing(pt.getInt("checkpointInterval", 1000));
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, pt.getInt("restartDelay", 0)));
if (pt.getBoolean("externalizedCheckpoints", false)) {
env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
}
String checkpointDir = pt.getRequired("checkpointDir");
env.getCheckpointConfig().setCheckpointStorage(checkpointDir);
boolean killJvmOnFail = pt.getBoolean("killJvmOnFail", false);
String stateBackend = pt.get("stateBackend", "hashmap");
if ("hashmap".equals(stateBackend)) {
env.setStateBackend(new HashMapStateBackend());
} else if ("rocks".equals(stateBackend)) {
boolean incrementalCheckpoints = pt.getBoolean("incrementalCheckpoints", false);
env.setStateBackend(new EmbeddedRocksDBStateBackend(incrementalCheckpoints));
} else {
throw new IllegalArgumentException("Unknown backend: " + stateBackend);
}
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(pt);
// delay to throttle down the production of the source
long delay = pt.getLong("delay", 0L);
// the maximum number of attempts, before the job finishes with success
int maxAttempts = pt.getInt("maxAttempts", 3);
// size of one artificial value
int valueSize = pt.getInt("valueSize", 10);
env.addSource(new RandomLongSource(maxAttempts, delay)).keyBy((KeySelector<Long, Long>) aLong -> aLong).flatMap(new StateCreatingFlatMap(valueSize, killJvmOnFail)).addSink(new PrintSinkFunction<>());
env.execute("Sticky Allocation And Local Recovery Test");
}
use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.
the class DataSetAllroundTestProgram method main.
@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
// get parameters
ParameterTool params = ParameterTool.fromArgs(args);
int loadFactor = Integer.parseInt(params.getRequired("loadFactor"));
String outputPath = params.getRequired("outputPath");
boolean infinite = params.getBoolean("infinite", false);
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
int numKeys = loadFactor * 128 * 1024;
DataSet<Tuple2<String, Integer>> x1Keys;
DataSet<Tuple2<String, Integer>> x2Keys = env.createInput(Generator.generate(numKeys * 32, 2)).setParallelism(4);
DataSet<Tuple2<String, Integer>> x8Keys = env.createInput(Generator.generate(numKeys, 8)).setParallelism(4);
if (infinite) {
x1Keys = env.createInput(Generator.generateInfinitely(numKeys)).setParallelism(4);
} else {
x1Keys = env.createInput(Generator.generate(numKeys, 1)).setParallelism(4);
}
DataSet<Tuple2<String, Integer>> joined = x2Keys.map(x -> Tuple4.of("0-0", 0L, 1, x.f0)).returns(Types.TUPLE(Types.STRING, Types.LONG, Types.INT, Types.STRING)).join(x8Keys).where(3).equalTo(0).with((l, r) -> Tuple2.of(l.f3, 1)).returns(Types.TUPLE(Types.STRING, Types.INT)).groupBy(new KeySelector<Tuple2<String, Integer>, String>() {
@Override
public String getKey(Tuple2<String, Integer> value) {
return value.f0;
}
}).reduce((value1, value2) -> Tuple2.of(value1.f0, value1.f1 + value2.f1));
// co-group two datasets on their primary keys.
// we filter both inputs such that only 6.25% of the keys overlap.
// result: (key, cnt), #keys records with unique keys, cnt = (6.25%: 2, 93.75%: 1)
DataSet<Tuple2<String, Integer>> coGrouped = x1Keys.filter(x -> x.f1 > 59).coGroup(x1Keys.filter(x -> x.f1 < 68)).where("f0").equalTo("f0").with((CoGroupFunction<Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>>) (l, r, out) -> {
int cnt = 0;
String key = "";
for (Tuple2<String, Integer> t : l) {
cnt++;
key = t.f0;
}
for (Tuple2<String, Integer> t : r) {
cnt++;
key = t.f0;
}
out.collect(Tuple2.of(key, cnt));
}).returns(Types.TUPLE(Types.STRING, Types.INT));
// join datasets on keys (1-1 join) and replicate by 16 (previously computed count)
// result: (key, cnt), 16 * #keys records, all keys preserved, cnt = (6.25%: 2, 93.75%: 1)
DataSet<Tuple2<String, Integer>> joined2 = joined.join(coGrouped, JoinOperatorBase.JoinHint.REPARTITION_SORT_MERGE).where(0).equalTo("f0").flatMap((FlatMapFunction<Tuple2<Tuple2<String, Integer>, Tuple2<String, Integer>>, Tuple2<String, Integer>>) (p, out) -> {
for (int i = 0; i < p.f0.f1; i++) {
out.collect(Tuple2.of(p.f0.f0, p.f1.f1));
}
}).returns(Types.TUPLE(Types.STRING, Types.INT));
// iteration. double the count field until all counts are at 32 or more
// result: (key, cnt), 16 * #keys records, all keys preserved, cnt = (6.25%: 64, 93.75%: 32)
IterativeDataSet<Tuple2<String, Integer>> initial = joined2.iterate(16);
DataSet<Tuple2<String, Integer>> iteration = initial.map(x -> Tuple2.of(x.f0, x.f1 * 2)).returns(Types.TUPLE(Types.STRING, Types.INT));
DataSet<Boolean> termination = iteration.flatMap((FlatMapFunction<Tuple2<String, Integer>, Boolean>) (x, out) -> {
if (x.f1 < 32) {
out.collect(false);
}
}).returns(Types.BOOLEAN);
DataSet<Tuple2<Integer, Integer>> result = initial.closeWith(iteration, termination).groupBy(1).reduceGroup((GroupReduceFunction<Tuple2<String, Integer>, Tuple2<Integer, Integer>>) (g, out) -> {
int key = 0;
int cnt = 0;
for (Tuple2<String, Integer> r : g) {
key = r.f1;
cnt++;
}
out.collect(Tuple2.of(key, cnt));
}).returns(Types.TUPLE(Types.INT, Types.INT)).map(x -> Tuple2.of(x.f0, x.f1 / (loadFactor * 128))).returns(Types.TUPLE(Types.INT, Types.INT));
// sort and emit result
result.sortPartition(0, Order.ASCENDING).setParallelism(1).writeAsText(outputPath, FileSystem.WriteMode.OVERWRITE).setParallelism(1);
env.execute();
}
use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.
the class TypeExtractorTest method testValue.
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testValue() {
// use getKeyExtractorType()
KeySelector<?, ?> function = new KeySelector<StringValue, StringValue>() {
private static final long serialVersionUID = 1L;
@Override
public StringValue getKey(StringValue value) {
return null;
}
};
TypeInformation<?> ti = TypeExtractor.getKeySelectorTypes(function, (TypeInformation) TypeInformation.of(new TypeHint<StringValue>() {
}));
Assert.assertFalse(ti.isBasicType());
Assert.assertFalse(ti.isTupleType());
Assert.assertTrue(ti instanceof ValueTypeInfo);
Assert.assertEquals(ti.getTypeClass(), StringValue.class);
// use getForClass()
Assert.assertTrue(TypeExtractor.getForClass(StringValue.class) instanceof ValueTypeInfo);
Assert.assertEquals(TypeExtractor.getForClass(StringValue.class).getTypeClass(), ti.getTypeClass());
// use getForObject()
StringValue v = new StringValue("Hello");
Assert.assertTrue(TypeExtractor.getForObject(v) instanceof ValueTypeInfo);
Assert.assertEquals(TypeExtractor.getForObject(v).getTypeClass(), ti.getTypeClass());
}
use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.
the class GroupingTest method testGroupSortKeyFields3.
@Test(expected = InvalidProgramException.class)
public void testGroupSortKeyFields3() {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Long> longDs = env.fromCollection(emptyLongData, BasicTypeInfo.LONG_TYPE_INFO);
// should not work: sorted groups on groupings by key selectors
longDs.groupBy(new KeySelector<Long, Long>() {
private static final long serialVersionUID = 1L;
@Override
public Long getKey(Long value) {
return value;
}
}).sortGroup(0, Order.ASCENDING);
}
use of org.apache.flink.api.java.functions.KeySelector in project flink by apache.
the class CEPMigrationTest method testRestoreStartingNewPatternAfterMigration.
@Test
public void testRestoreStartingNewPatternAfterMigration() throws Exception {
KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
final Event startEvent2 = new Event(42, "start", 5.0);
final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);
final Event endEvent = new Event(42, "end", 1.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.initializeState(OperatorSnapshotUtil.getResourceFilename("cep-migration-starting-new-pattern-flink" + migrateVersion + "-snapshot"));
harness.open();
harness.processElement(new StreamRecord<>(startEvent2, 5));
harness.processElement(new StreamRecord<Event>(middleEvent2, 6));
harness.processElement(new StreamRecord<>(endEvent, 7));
harness.processWatermark(new Watermark(20));
ConcurrentLinkedQueue<Object> result = harness.getOutput();
// watermark and 3 results
assertEquals(4, result.size());
Object resultObject1 = result.poll();
assertTrue(resultObject1 instanceof StreamRecord);
StreamRecord<?> resultRecord1 = (StreamRecord<?>) resultObject1;
assertTrue(resultRecord1.getValue() instanceof Map);
Object resultObject2 = result.poll();
assertTrue(resultObject2 instanceof StreamRecord);
StreamRecord<?> resultRecord2 = (StreamRecord<?>) resultObject2;
assertTrue(resultRecord2.getValue() instanceof Map);
Object resultObject3 = result.poll();
assertTrue(resultObject3 instanceof StreamRecord);
StreamRecord<?> resultRecord3 = (StreamRecord<?>) resultObject3;
assertTrue(resultRecord3.getValue() instanceof Map);
@SuppressWarnings("unchecked") Map<String, List<Event>> patternMap1 = (Map<String, List<Event>>) resultRecord1.getValue();
assertEquals(startEvent1, patternMap1.get("start").get(0));
assertEquals(middleEvent1, patternMap1.get("middle").get(0));
assertEquals(endEvent, patternMap1.get("end").get(0));
@SuppressWarnings("unchecked") Map<String, List<Event>> patternMap2 = (Map<String, List<Event>>) resultRecord2.getValue();
assertEquals(startEvent1, patternMap2.get("start").get(0));
assertEquals(middleEvent2, patternMap2.get("middle").get(0));
assertEquals(endEvent, patternMap2.get("end").get(0));
@SuppressWarnings("unchecked") Map<String, List<Event>> patternMap3 = (Map<String, List<Event>>) resultRecord3.getValue();
assertEquals(startEvent2, patternMap3.get("start").get(0));
assertEquals(middleEvent2, patternMap3.get("middle").get(0));
assertEquals(endEvent, patternMap3.get("end").get(0));
// and now go for a checkpoint with the new serializers
final Event startEvent3 = new Event(42, "start", 2.0);
final SubEvent middleEvent3 = new SubEvent(42, "foo", 1.0, 11.0);
final Event endEvent1 = new Event(42, "end", 2.0);
harness.processElement(new StreamRecord<Event>(startEvent3, 21));
harness.processElement(new StreamRecord<Event>(middleEvent3, 23));
// simulate snapshot/restore with some elements in internal sorting queue
OperatorSubtaskState snapshot = harness.snapshot(1L, 1L);
harness.close();
harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
harness.setup();
harness.initializeState(snapshot);
harness.open();
harness.processElement(new StreamRecord<>(endEvent1, 25));
harness.processWatermark(new Watermark(50));
result = harness.getOutput();
// watermark and the result
assertEquals(2, result.size());
Object resultObject4 = result.poll();
assertTrue(resultObject4 instanceof StreamRecord);
StreamRecord<?> resultRecord4 = (StreamRecord<?>) resultObject4;
assertTrue(resultRecord4.getValue() instanceof Map);
@SuppressWarnings("unchecked") Map<String, List<Event>> patternMap4 = (Map<String, List<Event>>) resultRecord4.getValue();
assertEquals(startEvent3, patternMap4.get("start").get(0));
assertEquals(middleEvent3, patternMap4.get("middle").get(0));
assertEquals(endEvent1, patternMap4.get("end").get(0));
} finally {
harness.close();
}
}
Aggregations