use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class WebFrontendITCase method testCancelYarn.
@Test
public void testCancelYarn() throws Exception {
// this only works if there is no active job at this point
assertTrue(getRunningJobs(CLUSTER.getClusterClient()).isEmpty());
// Create a task
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(2);
sender.setInvokableClass(BlockingInvokable.class);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(sender);
final JobID jid = jobGraph.getJobID();
ClusterClient<?> clusterClient = CLUSTER.getClusterClient();
clusterClient.submitJob(jobGraph).get();
// wait for job to show up
while (getRunningJobs(CLUSTER.getClusterClient()).isEmpty()) {
Thread.sleep(10);
}
// wait for tasks to be properly running
BlockingInvokable.latch.await();
final Duration testTimeout = Duration.ofMinutes(2);
final Deadline deadline = Deadline.fromNow(testTimeout);
try (HttpTestClient client = new HttpTestClient("localhost", getRestPort())) {
// Request the file from the web server
client.sendGetRequest("/jobs/" + jid + "/yarn-cancel", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.ACCEPTED, response.getStatus());
assertEquals("application/json; charset=UTF-8", response.getType());
assertEquals("{}", response.getContent());
}
// wait for cancellation to finish
while (!getRunningJobs(CLUSTER.getClusterClient()).isEmpty()) {
Thread.sleep(20);
}
BlockingInvokable.reset();
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class KinesisPubsubClient method createTopic.
public void createTopic(String stream, int shards, Properties props) throws Exception {
try {
kinesisClient.describeStream(stream);
kinesisClient.deleteStream(stream);
} catch (ResourceNotFoundException rnfe) {
// expected when stream doesn't exist
}
kinesisClient.createStream(stream, shards);
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(5));
while (deadline.hasTimeLeft()) {
try {
// sleep for a bit for stream to be created
Thread.sleep(250);
if (kinesisClient.describeStream(stream).getStreamDescription().getShards().size() != shards) {
// not fully created yet
continue;
}
break;
} catch (ResourceNotFoundException rnfe) {
// not ready yet
}
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class RecordEmitterTest method testRetainMinAfterReachingLimit.
@Test
public void testRetainMinAfterReachingLimit() throws Exception {
TestRecordEmitter emitter = new TestRecordEmitter();
final TimestampedValue<String> one = new TimestampedValue<>("1", 1);
final TimestampedValue<String> two = new TimestampedValue<>("2", 2);
final TimestampedValue<String> three = new TimestampedValue<>("3", 3);
final TimestampedValue<String> ten = new TimestampedValue<>("10", 10);
final TimestampedValue<String> eleven = new TimestampedValue<>("11", 11);
final TimestampedValue<String> twenty = new TimestampedValue<>("20", 20);
final TimestampedValue<String> thirty = new TimestampedValue<>("30", 30);
final RecordEmitter.RecordQueue<TimestampedValue> queue0 = emitter.getQueue(0);
final RecordEmitter.RecordQueue<TimestampedValue> queue1 = emitter.getQueue(1);
queue0.put(one);
queue0.put(two);
queue0.put(three);
queue0.put(ten);
queue0.put(eleven);
queue1.put(twenty);
queue1.put(thirty);
emitter.setMaxLookaheadMillis(1);
emitter.setCurrentWatermark(5);
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.submit(emitter);
try {
// emits one record past the limit
Deadline dl = Deadline.fromNow(Duration.ofSeconds(10));
while (emitter.results.size() != 4 && dl.hasTimeLeft()) {
Thread.sleep(10);
}
Assert.assertThat(emitter.results, Matchers.contains(one, two, three, ten));
// advance watermark, emits remaining record from queue0
emitter.setCurrentWatermark(10);
dl = Deadline.fromNow(Duration.ofSeconds(10));
while (emitter.results.size() != 5 && dl.hasTimeLeft()) {
Thread.sleep(10);
}
Assert.assertThat(emitter.results, Matchers.contains(one, two, three, ten, eleven));
} finally {
emitter.stop();
executor.shutdownNow();
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class ExecutionGraphTestUtils method waitForAllExecutionsPredicate.
/**
* Waits until all executions fulfill the given predicate.
*
* @param executionGraph for which to check the executions
* @param executionPredicate predicate which is to be fulfilled
* @param maxWaitMillis timeout for the wait operation
* @throws TimeoutException if the executions did not reach the target state in time
*/
public static void waitForAllExecutionsPredicate(ExecutionGraph executionGraph, Predicate<AccessExecution> executionPredicate, long maxWaitMillis) throws TimeoutException {
final Predicate<AccessExecutionGraph> allExecutionsPredicate = allExecutionsPredicate(executionPredicate);
final Deadline deadline = Deadline.fromNow(Duration.ofMillis(maxWaitMillis));
boolean predicateResult;
do {
predicateResult = allExecutionsPredicate.test(executionGraph);
if (!predicateResult) {
try {
Thread.sleep(2L);
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
}
}
} while (!predicateResult && deadline.hasTimeLeft());
if (!predicateResult) {
throw new TimeoutException("Not all executions fulfilled the predicate in time.");
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class JobDispatcherITCase method testRecoverFromCheckpointAfterLosingAndRegainingLeadership.
@Test
public void testRecoverFromCheckpointAfterLosingAndRegainingLeadership(@TempDir Path tmpPath) throws Exception {
final Deadline deadline = Deadline.fromNow(TIMEOUT);
final Configuration configuration = new Configuration();
configuration.set(HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.ZOOKEEPER.name());
final TestingMiniClusterConfiguration clusterConfiguration = TestingMiniClusterConfiguration.newBuilder().setConfiguration(configuration).build();
final EmbeddedHaServicesWithLeadershipControl haServices = new EmbeddedHaServicesWithLeadershipControl(TestingUtils.defaultExecutor());
final Configuration newConfiguration = new Configuration(clusterConfiguration.getConfiguration());
final long checkpointInterval = 100;
final JobID jobID = generateAndPersistJobGraph(newConfiguration, checkpointInterval, tmpPath);
final TestingMiniCluster.Builder clusterBuilder = TestingMiniCluster.newBuilder(clusterConfiguration).setHighAvailabilityServicesSupplier(() -> haServices).setDispatcherResourceManagerComponentFactorySupplier(createJobModeDispatcherResourceManagerComponentFactorySupplier(newConfiguration));
AtLeastOneCheckpointInvokable.reset();
try (final MiniCluster cluster = clusterBuilder.build()) {
// start mini cluster and submit the job
cluster.start();
AtLeastOneCheckpointInvokable.atLeastOneCheckpointCompleted.await();
final CompletableFuture<JobResult> firstJobResult = cluster.requestJobResult(jobID);
haServices.revokeDispatcherLeadership();
// make sure the leadership is revoked to avoid race conditions
Assertions.assertEquals(ApplicationStatus.UNKNOWN, firstJobResult.get().getApplicationStatus());
haServices.grantDispatcherLeadership();
// job is suspended, wait until it's running
awaitJobStatus(cluster, jobID, JobStatus.RUNNING, deadline);
CommonTestUtils.waitUntilCondition(() -> cluster.getArchivedExecutionGraph(jobID).get().getCheckpointStatsSnapshot().getLatestRestoredCheckpoint() != null, deadline);
}
}
Aggregations