use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class SQLClientKafkaITCase method checkCsvResultFile.
private void checkCsvResultFile() throws Exception {
boolean success = false;
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
while (deadline.hasTimeLeft()) {
if (Files.exists(result)) {
List<String> lines = readCsvResultFiles(result);
if (lines.size() == 4) {
success = true;
assertThat(lines.toArray(new String[0]), arrayContainingInAnyOrder("2018-03-12 08:00:00.000,Alice,This was a warning.,2,Success constant folding.", "2018-03-12 09:00:00.000,Bob,This was another warning.,1,Success constant folding.", "2018-03-12 09:00:00.000,Steve,This was another info.,2,Success constant folding.", "2018-03-12 09:00:00.000,Alice,This was a info.,1,Success constant folding."));
break;
} else {
LOG.info("The target CSV {} does not contain enough records, current {} records, left time: {}s", result, lines.size(), deadline.timeLeft().getSeconds());
}
} else {
LOG.info("The target CSV {} does not exist now", result);
}
Thread.sleep(500);
}
Assert.assertTrue("Did not get expected results before timeout.", success);
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class KinesisFirehoseTableITTest method readFromS3.
private List<Order> readFromS3() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
List<S3Object> ordersObjects;
List<Order> orders;
do {
Thread.sleep(1000);
ordersObjects = listBucketObjects(s3AsyncClient, BUCKET_NAME);
orders = readObjectsFromS3Bucket(s3AsyncClient, ordersObjects, BUCKET_NAME, responseBytes -> fromJson(new String(responseBytes.asByteArrayUnsafe()), Order.class));
} while (deadline.hasTimeLeft() && orders.size() < NUM_ELEMENTS);
return orders;
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class WebFrontendITCase method testCancel.
@Test
public void testCancel() throws Exception {
// this only works if there is no active job at this point
assertTrue(getRunningJobs(CLUSTER.getClusterClient()).isEmpty());
// Create a task
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(2);
sender.setInvokableClass(BlockingInvokable.class);
final JobGraph jobGraph = JobGraphBuilder.newStreamingJobGraphBuilder().setJobName("Stoppable streaming test job").addJobVertex(sender).build();
final JobID jid = jobGraph.getJobID();
ClusterClient<?> clusterClient = CLUSTER.getClusterClient();
clusterClient.submitJob(jobGraph).get();
// wait for job to show up
while (getRunningJobs(CLUSTER.getClusterClient()).isEmpty()) {
Thread.sleep(10);
}
// wait for tasks to be properly running
BlockingInvokable.latch.await();
final Duration testTimeout = Duration.ofMinutes(2);
final Deadline deadline = Deadline.fromNow(testTimeout);
try (HttpTestClient client = new HttpTestClient("localhost", getRestPort())) {
// cancel the job
client.sendPatchRequest("/jobs/" + jid + "/", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.ACCEPTED, response.getStatus());
assertEquals("application/json; charset=UTF-8", response.getType());
assertEquals("{}", response.getContent());
}
// wait for cancellation to finish
while (!getRunningJobs(CLUSTER.getClusterClient()).isEmpty()) {
Thread.sleep(20);
}
// ensure we can access job details when its finished (FLINK-4011)
try (HttpTestClient client = new HttpTestClient("localhost", getRestPort())) {
Duration timeout = Duration.ofSeconds(30);
client.sendGetRequest("/jobs/" + jid + "/config", timeout);
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(timeout);
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals("application/json; charset=UTF-8", response.getType());
assertEquals("{\"jid\":\"" + jid + "\",\"name\":\"Stoppable streaming test job\"," + "\"execution-config\":{\"execution-mode\":\"PIPELINED\",\"restart-strategy\":\"Cluster level default restart strategy\"," + "\"job-parallelism\":1,\"object-reuse-mode\":false,\"user-config\":{}}}", response.getContent());
}
BlockingInvokable.reset();
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class FlinkKinesisProducerTest method testBackpressure.
/**
* Test ensuring that the producer blocks if the queue limit is exceeded, until the queue length
* drops below the limit; we set a timeout because the test will not finish if the logic is
* broken.
*/
@Test(timeout = 10000)
public void testBackpressure() throws Throwable {
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
final DummyFlinkKinesisProducer<String> producer = new DummyFlinkKinesisProducer<>(new SimpleStringSchema());
producer.setQueueLimit(1);
OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));
testHarness.open();
UserRecordResult result = mock(UserRecordResult.class);
when(result.isSuccessful()).thenReturn(true);
CheckedThread msg1 = new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.processElement(new StreamRecord<>("msg-1"));
}
};
msg1.start();
msg1.trySync(deadline.timeLeftIfAny().toMillis());
assertFalse("Flush triggered before reaching queue limit", msg1.isAlive());
// consume msg-1 so that queue is empty again
producer.getPendingRecordFutures().get(0).set(result);
CheckedThread msg2 = new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.processElement(new StreamRecord<>("msg-2"));
}
};
msg2.start();
msg2.trySync(deadline.timeLeftIfAny().toMillis());
assertFalse("Flush triggered before reaching queue limit", msg2.isAlive());
CheckedThread moreElementsThread = new CheckedThread() {
@Override
public void go() throws Exception {
// this should block until msg-2 is consumed
testHarness.processElement(new StreamRecord<>("msg-3"));
// this should block until msg-3 is consumed
testHarness.processElement(new StreamRecord<>("msg-4"));
}
};
moreElementsThread.start();
assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive());
// consume msg-2 from the queue, leaving msg-3 in the queue and msg-4 blocked
while (producer.getPendingRecordFutures().size() < 2) {
Thread.sleep(50);
}
producer.getPendingRecordFutures().get(1).set(result);
assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive());
// consume msg-3, blocked msg-4 can be inserted into the queue and block is released
while (producer.getPendingRecordFutures().size() < 3) {
Thread.sleep(50);
}
producer.getPendingRecordFutures().get(2).set(result);
moreElementsThread.trySync(deadline.timeLeftIfAny().toMillis());
assertFalse("Prodcuer still blocks although the queue is flushed", moreElementsThread.isAlive());
producer.getPendingRecordFutures().get(3).set(result);
testHarness.close();
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class FlinkKinesisConsumerTest method awaitRecordCount.
private void awaitRecordCount(ConcurrentLinkedQueue<? extends Object> queue, int count) throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
while (deadline.hasTimeLeft() && queue.size() < count) {
Thread.sleep(10);
}
int received = queue.size();
if (received < count) {
Assert.fail(String.format("Timeout waiting for records, received %d/%d", received, count));
}
}
Aggregations