use of org.apache.flink.api.java.typeutils.GenericTypeInfo in project beam by apache.
the class FlinkKeyGroupStateInternalsTest method getKeyedStateBackend.
private KeyedStateBackend getKeyedStateBackend(int numberOfKeyGroups, KeyGroupRange keyGroupRange) {
MemoryStateBackend backend = new MemoryStateBackend();
try {
AbstractKeyedStateBackend<ByteBuffer> keyedStateBackend = backend.createKeyedStateBackend(new DummyEnvironment("test", 1, 0), new JobID(), "test_op", new GenericTypeInfo<>(ByteBuffer.class).createSerializer(new ExecutionConfig()), numberOfKeyGroups, keyGroupRange, new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()));
keyedStateBackend.setCurrentKey(ByteBuffer.wrap(CoderUtils.encodeToByteArray(StringUtf8Coder.of(), "1")));
return keyedStateBackend;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.flink.api.java.typeutils.GenericTypeInfo in project beam by apache.
the class FlinkStateInternalsTest method initStateInternals.
@Before
public void initStateInternals() {
MemoryStateBackend backend = new MemoryStateBackend();
try {
AbstractKeyedStateBackend<ByteBuffer> keyedStateBackend = backend.createKeyedStateBackend(new DummyEnvironment("test", 1, 0), new JobID(), "test_op", new GenericTypeInfo<>(ByteBuffer.class).createSerializer(new ExecutionConfig()), 1, new KeyGroupRange(0, 0), new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()));
underTest = new FlinkStateInternals<>(keyedStateBackend, StringUtf8Coder.of());
keyedStateBackend.setCurrentKey(ByteBuffer.wrap(CoderUtils.encodeToByteArray(StringUtf8Coder.of(), "Hello")));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.flink.api.java.typeutils.GenericTypeInfo in project flink by apache.
the class KafkaITCase method testTimestamps.
/**
* Kafka 20 specific test, ensuring Timestamps are properly written to and read from Kafka.
*/
@Test(timeout = 60000)
public void testTimestamps() throws Exception {
final String topic = "tstopic";
createTestTopic(topic, 3, 1);
// ---------- Produce an event time stream into Kafka -------------------
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
DataStream<Long> streamWithTimestamps = env.addSource(new SourceFunction<Long>() {
private static final long serialVersionUID = -2255115836471289626L;
boolean running = true;
@Override
public void run(SourceContext<Long> ctx) throws Exception {
long i = 0;
while (running) {
ctx.collectWithTimestamp(i, i * 2);
if (i++ == 1110L) {
running = false;
}
}
}
@Override
public void cancel() {
running = false;
}
});
final TypeInformationSerializationSchema<Long> longSer = new TypeInformationSerializationSchema<>(Types.LONG, env.getConfig());
FlinkKafkaProducer<Long> prod = new FlinkKafkaProducer<>(topic, new KeyedSerializationSchemaWrapper<>(longSer), standardProps, Optional.of(new FlinkKafkaPartitioner<Long>() {
private static final long serialVersionUID = -6730989584364230617L;
@Override
public int partition(Long next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
return (int) (next % 3);
}
}));
prod.setWriteTimestampToKafka(true);
streamWithTimestamps.addSink(prod).setParallelism(3);
env.execute("Produce some");
// ---------- Consume stream from Kafka -------------------
env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
FlinkKafkaConsumer<Long> kafkaSource = new FlinkKafkaConsumer<>(topic, new KafkaITCase.LimitedLongDeserializer(), standardProps);
kafkaSource.assignTimestampsAndWatermarks(new AssignerWithPunctuatedWatermarks<Long>() {
private static final long serialVersionUID = -4834111173247835189L;
@Nullable
@Override
public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
if (lastElement % 11 == 0) {
return new Watermark(lastElement);
}
return null;
}
@Override
public long extractTimestamp(Long element, long previousElementTimestamp) {
return previousElementTimestamp;
}
});
DataStream<Long> stream = env.addSource(kafkaSource);
GenericTypeInfo<Object> objectTypeInfo = new GenericTypeInfo<>(Object.class);
stream.transform("timestamp validating operator", objectTypeInfo, new TimestampValidatingOperator()).setParallelism(1);
env.execute("Consume again");
deleteTestTopic(topic);
}
use of org.apache.flink.api.java.typeutils.GenericTypeInfo in project flink by apache.
the class KryoWithCustomSerializersTest method createSerializer.
@Override
protected <T> TypeSerializer<T> createSerializer(Class<T> type) {
ExecutionConfig conf = new ExecutionConfig();
conf.registerTypeWithKryoSerializer(LocalDate.class, LocalDateSerializer.class);
TypeInformation<T> typeInfo = new GenericTypeInfo<T>(type);
return typeInfo.createSerializer(conf);
}
use of org.apache.flink.api.java.typeutils.GenericTypeInfo in project flink by apache.
the class AbstractQueryableStateTestBase method testCustomKryoSerializerHandling.
/**
* This test checks if custom Kryo serializers are loaded with correct classloader.
*/
@Test
public void testCustomKryoSerializerHandling() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final long numElements = 1;
final String stateName = "teriberka";
final String customSerializerClassName = "CustomKryo";
final URLClassLoader userClassLoader = createLoaderWithCustomKryoSerializer(customSerializerClassName);
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
// Custom serializer is not needed, it's used just to check if serialization works.
env.getConfig().addDefaultKryoSerializer(Byte.class, (Serializer<?> & Serializable) createSerializer(userClassLoader));
// Here we *force* using Kryo, to check if custom serializers are handled correctly WRT
// classloading
@SuppressWarnings({ "rawtypes", "unchecked" }) ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", new GenericTypeInfo(Tuple2.class));
env.addSource(new TestAscendingValueSource(numElements)).keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 7662520075515707428L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState(stateName, valueState);
try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
final JobID jobId = autoCancellableJob.getJobId();
final JobGraph jobGraph = autoCancellableJob.getJobGraph();
jobGraph.setClasspaths(Arrays.asList(userClassLoader.getURLs()));
clusterClient.submitJob(jobGraph).get();
try {
client.setUserClassLoader(userClassLoader);
executeValueQuery(deadline, client, jobId, stateName, valueState, numElements);
} finally {
client.setUserClassLoader(null);
}
}
}
Aggregations