use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class KinesisDataStreamsTableApiIT method readAllOrdersFromKinesis.
private List<Order> readAllOrdersFromKinesis() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
List<Order> orders;
do {
orders = readMessagesFromStream(recordBytes -> fromJson(new String(recordBytes), Order.class));
} while (deadline.hasTimeLeft() && orders.size() < 5);
return orders;
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class KinesisDataStreamsTableApiIT method prepareStream.
private void prepareStream(String streamName) throws Exception {
final RateLimiter rateLimiter = RateLimiterBuilder.newBuilder().withRate(1, SECONDS).withConstantThroughput().build();
kinesisClient.createStream(CreateStreamRequest.builder().streamName(streamName).shardCount(1).build()).get();
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
while (!rateLimiter.getWhenReady(() -> streamExists(streamName))) {
if (deadline.isOverdue()) {
throw new RuntimeException("Failed to create stream within time");
}
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class KafkaContainerClient method readMessages.
public <T> List<T> readMessages(int expectedNumMessages, String groupId, String topic, Deserializer<T> valueDeserializer) throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, container.getBootstrapServers());
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final List<T> messages = Collections.synchronizedList(new ArrayList<>(expectedNumMessages));
try (Consumer<Bytes, T> consumer = new KafkaConsumer<>(props, new BytesDeserializer(), valueDeserializer)) {
waitUntilTopicAvailableThenAssign(topic, consumer, Duration.ofSeconds(60));
// Keep polling until getting expected number of messages
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
while (deadline.hasTimeLeft() && messages.size() < expectedNumMessages) {
LOG.info("Waiting for messages. Received {}/{}.", messages.size(), expectedNumMessages);
ConsumerRecords<Bytes, T> records = consumer.poll(Duration.ofMillis(1000));
for (ConsumerRecord<Bytes, T> record : records) {
messages.add(record.value());
}
}
if (messages.size() != expectedNumMessages) {
throw new IOException("Could not read expected number of messages.");
}
return messages;
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class SQLClientHBaseITCase method checkHBaseSinkResult.
private void checkHBaseSinkResult() throws Exception {
boolean success = false;
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
while (deadline.hasTimeLeft()) {
final List<String> lines = hbase.scanTable("sink");
if (lines.size() == 6) {
success = true;
assertThat(lines.toArray(new String[0]), arrayContainingInAnyOrder(CoreMatchers.allOf(containsString("row1"), containsString("family1"), containsString("f1c1"), containsString("value1")), CoreMatchers.allOf(containsString("row1"), containsString("family2"), containsString("f2c1"), containsString("v2")), CoreMatchers.allOf(containsString("row1"), containsString("family2"), containsString("f2c2"), containsString("v3")), CoreMatchers.allOf(containsString("row2"), containsString("family1"), containsString("f1c1"), containsString("value4")), CoreMatchers.allOf(containsString("row2"), containsString("family2"), containsString("f2c1"), containsString("v5")), CoreMatchers.allOf(containsString("row2"), containsString("family2"), containsString("f2c2"), containsString("v6"))));
break;
} else {
LOG.info("The HBase sink table does not contain enough records, current {} records, left time: {}s", lines.size(), deadline.timeLeft().getSeconds());
}
Thread.sleep(500);
}
Assert.assertTrue("Did not get expected results before timeout.", success);
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class AbstractQueryableStateTestBase method testValueState.
/**
* Tests simple value state queryable state instance. Each source emits (subtaskIndex,
* 0)..(subtaskIndex, numElements) tuples, which are then queried. The tests succeeds after each
* subtask index is queried with value numElements (the latest element updated the state).
*/
@Test
public void testValueState() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final long numElements = 1024L;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
// Value state
ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 7662520075515707428L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState("hakuna", valueState);
try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
final JobID jobId = autoCancellableJob.getJobId();
final JobGraph jobGraph = autoCancellableJob.getJobGraph();
clusterClient.submitJob(jobGraph).get();
executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
}
}
Aggregations