use of com.amazonaws.services.schemaregistry.serializers.json.JsonDataWithSchema in project flink by apache.
the class GlueSchemaRegistryJsonKinesisITCase method createSource.
private FlinkKinesisConsumer<JsonDataWithSchema> createSource() {
Properties properties = KINESALITE.getContainerProperties();
properties.setProperty(STREAM_INITIAL_POSITION, ConsumerConfigConstants.InitialPosition.TRIM_HORIZON.name());
return new FlinkKinesisConsumer<>(INPUT_STREAM, new GlueSchemaRegistryJsonDeserializationSchema<>(JsonDataWithSchema.class, INPUT_STREAM, getConfigs()), properties);
}
use of com.amazonaws.services.schemaregistry.serializers.json.JsonDataWithSchema in project flink by apache.
the class GlueSchemaRegistryJsonKinesisITCase method testGSRJsonGenericFormatWithFlink.
@Test
public void testGSRJsonGenericFormatWithFlink() throws Exception {
List<JsonDataWithSchema> messages = getGenericRecords();
for (JsonDataWithSchema msg : messages) {
kinesisClient.sendMessage(msg.getSchema(), INPUT_STREAM, msg);
}
log.info("generated records");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
DataStream<JsonDataWithSchema> input = env.addSource(createSource());
input.addSink(createSink());
env.executeAsync();
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(60));
List<Object> results = kinesisClient.readAllMessages(OUTPUT_STREAM);
while (deadline.hasTimeLeft() && results.size() < messages.size()) {
log.info("waiting for results..");
Thread.sleep(1000);
results = kinesisClient.readAllMessages(OUTPUT_STREAM);
}
log.info("results: {}", results);
assertThat(results).containsExactlyInAnyOrderElementsOf(messages);
}
Aggregations