use of org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor in project flink by apache.
the class KafkaConsumerTestBase method runCollectingSchemaTest.
/**
* Test that ensures that DeserializationSchema can emit multiple records via a Collector.
*
* @throws Exception
*/
public void runCollectingSchemaTest() throws Exception {
final int elementCount = 20;
final String topic = writeSequence("testCollectingSchema", elementCount, 1, 1);
// read using custom schema
final StreamExecutionEnvironment env1 = StreamExecutionEnvironment.getExecutionEnvironment();
env1.setParallelism(1);
env1.getConfig().setRestartStrategy(RestartStrategies.noRestart());
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
DataStream<Tuple2<Integer, String>> fromKafka = env1.addSource(kafkaServer.getConsumer(topic, new CollectingDeserializationSchema(elementCount), props).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple2<Integer, String>>() {
@Override
public long extractAscendingTimestamp(Tuple2<Integer, String> element) {
String string = element.f1;
return Long.parseLong(string.substring(0, string.length() - 1));
}
}));
fromKafka.keyBy(t -> t.f0).process(new KeyedProcessFunction<Integer, Tuple2<Integer, String>, Void>() {
private boolean registered = false;
@Override
public void processElement(Tuple2<Integer, String> value, Context ctx, Collector<Void> out) throws Exception {
if (!registered) {
ctx.timerService().registerEventTimeTimer(elementCount - 2);
registered = true;
}
}
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<Void> out) throws Exception {
throw new SuccessException();
}
});
tryExecute(env1, "Consume " + elementCount + " elements from Kafka");
deleteTestTopic(topic);
}
use of org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor in project flink by apache.
the class JdbcDynamicTableSinkITCase method testUpsert.
@Test
public void testUpsert() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().enableObjectReuse();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
Table t = tEnv.fromDataStream(get4TupleDataStream(env).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple4<Integer, Long, String, Timestamp>>() {
@Override
public long extractAscendingTimestamp(Tuple4<Integer, Long, String, Timestamp> element) {
return element.f0;
}
}), $("id"), $("num"), $("text"), $("ts"));
tEnv.createTemporaryView("T", t);
tEnv.executeSql("CREATE TABLE upsertSink (" + " cnt BIGINT," + " lencnt BIGINT," + " cTag INT," + " ts TIMESTAMP(3)," + " PRIMARY KEY (cnt, cTag) NOT ENFORCED" + ") WITH (" + " 'connector'='jdbc'," + " 'url'='" + DB_URL + "'," + " 'table-name'='" + OUTPUT_TABLE1 + "'," + " 'sink.buffer-flush.max-rows' = '2'," + " 'sink.buffer-flush.interval' = '0'," + " 'sink.max-retries' = '0'" + ")");
tEnv.executeSql("INSERT INTO upsertSink \n" + "SELECT cnt, COUNT(len) AS lencnt, cTag, MAX(ts) AS ts\n" + "FROM (\n" + " SELECT len, COUNT(id) as cnt, cTag, MAX(ts) AS ts\n" + " FROM (SELECT id, CHAR_LENGTH(text) AS len, (CASE WHEN id > 0 THEN 1 ELSE 0 END) cTag, ts FROM T)\n" + " GROUP BY len, cTag\n" + ")\n" + "GROUP BY cnt, cTag").await();
check(new Row[] { Row.of(1, 5, 1, Timestamp.valueOf("1970-01-01 00:00:00.006")), Row.of(7, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.021")), Row.of(9, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.015")) }, DB_URL, OUTPUT_TABLE1, new String[] { "cnt", "lencnt", "cTag", "ts" });
}
use of org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor in project flink by apache.
the class OracleTableSinkITCase method testUpsert.
@Test
public void testUpsert() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().enableObjectReuse();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
Table t = tEnv.fromDataStream(get4TupleDataStream(env).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple4<Integer, Long, String, Timestamp>>() {
@Override
public long extractAscendingTimestamp(Tuple4<Integer, Long, String, Timestamp> element) {
return element.f0;
}
}), $("id"), $("num"), $("text"), $("ts"));
tEnv.createTemporaryView("T", t);
tEnv.executeSql("CREATE TABLE upsertSink (" + " cnt DECIMAL(18,2)," + " lencnt DECIMAL(18,2)," + " cTag INT," + " ts TIMESTAMP(3)," + " PRIMARY KEY (cnt, cTag) NOT ENFORCED" + ") WITH (" + " 'connector'='jdbc'," + " 'url'='" + containerUrl + "'," + " 'table-name'='" + OUTPUT_TABLE1 + "'," + " 'sink.buffer-flush.max-rows' = '2'," + " 'sink.buffer-flush.interval' = '0'," + " 'sink.max-retries' = '0'" + ")");
tEnv.executeSql("INSERT INTO upsertSink \n" + "SELECT cnt, COUNT(len) AS lencnt, cTag, MAX(ts) AS ts\n" + "FROM (\n" + " SELECT len, COUNT(id) as cnt, cTag, MAX(ts) AS ts\n" + " FROM (SELECT id, CHAR_LENGTH(text) AS len, (CASE WHEN id > 0 THEN 1 ELSE 0 END) cTag, ts FROM T)\n" + " GROUP BY len, cTag\n" + ")\n" + "GROUP BY cnt, cTag").await();
check(new Row[] { Row.of(1, 5, 1, Timestamp.valueOf("1970-01-01 00:00:00.006")), Row.of(7, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.021")), Row.of(9, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.015")) }, containerUrl, OUTPUT_TABLE1, new String[] { "cnt", "lencnt", "cTag", "ts" });
}
Aggregations