use of org.apache.flink.test.junit5.InjectMiniCluster in project flink by apache.
the class TaskCancelAsyncProducerConsumerITCase method testCancelAsyncProducerAndConsumer.
/**
* Tests that a task waiting on an async producer/consumer that is stuck in a blocking buffer
* request can be properly cancelled.
*
* <p>This is currently required for the Flink Kafka sources, which spawn a separate Thread
* consuming from Kafka and producing the intermediate streams in the spawned Thread instead of
* the main task Thread.
*/
@Test
public void testCancelAsyncProducerAndConsumer(@InjectMiniCluster MiniCluster flink) throws Exception {
Deadline deadline = Deadline.now().plus(Duration.ofMinutes(2));
// Job with async producer and consumer
JobVertex producer = new JobVertex("AsyncProducer");
producer.setParallelism(1);
producer.setInvokableClass(AsyncProducer.class);
JobVertex consumer = new JobVertex("AsyncConsumer");
consumer.setParallelism(1);
consumer.setInvokableClass(AsyncConsumer.class);
consumer.connectNewDataSetAsInput(producer, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
SlotSharingGroup slot = new SlotSharingGroup();
producer.setSlotSharingGroup(slot);
consumer.setSlotSharingGroup(slot);
JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producer, consumer);
// Submit job and wait until running
flink.runDetached(jobGraph);
FutureUtils.retrySuccessfulWithDelay(() -> flink.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> status == JobStatus.RUNNING, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
boolean producerBlocked = false;
for (int i = 0; i < 50; i++) {
Thread thread = ASYNC_PRODUCER_THREAD;
if (thread != null && thread.isAlive()) {
StackTraceElement[] stackTrace = thread.getStackTrace();
producerBlocked = isInBlockingBufferRequest(stackTrace);
}
if (producerBlocked) {
break;
} else {
// Retry
Thread.sleep(500L);
}
}
// Verify that async producer is in blocking request
assertTrue("Producer thread is not blocked: " + Arrays.toString(ASYNC_PRODUCER_THREAD.getStackTrace()), producerBlocked);
boolean consumerWaiting = false;
for (int i = 0; i < 50; i++) {
Thread thread = ASYNC_CONSUMER_THREAD;
if (thread != null && thread.isAlive()) {
consumerWaiting = thread.getState() == Thread.State.WAITING;
}
if (consumerWaiting) {
break;
} else {
// Retry
Thread.sleep(500L);
}
}
// Verify that async consumer is in blocking request
assertTrue("Consumer thread is not blocked.", consumerWaiting);
flink.cancelJob(jobGraph.getJobID()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// wait until the job is canceled
FutureUtils.retrySuccessfulWithDelay(() -> flink.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> status == JobStatus.CANCELED, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// Verify the expected Exceptions
assertNotNull(ASYNC_PRODUCER_EXCEPTION);
assertEquals(CancelTaskException.class, ASYNC_PRODUCER_EXCEPTION.getClass());
assertNotNull(ASYNC_CONSUMER_EXCEPTION);
assertEquals(IllegalStateException.class, ASYNC_CONSUMER_EXCEPTION.getClass());
}
use of org.apache.flink.test.junit5.InjectMiniCluster in project flink by apache.
the class AbstractHAJobRunITCase method testJobExecutionInHaMode.
@Test
public void testJobExecutionInHaMode(@InjectMiniCluster MiniCluster flinkCluster) throws Exception {
final JobGraph jobGraph = JobGraphTestUtils.singleNoOpJobGraph();
// providing a timeout helps making the test fail in case some issue occurred while
// initializing the cluster
flinkCluster.submitJob(jobGraph).get(30, TimeUnit.SECONDS);
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
final JobStatus jobStatus = FutureUtils.retrySuccessfulWithDelay(() -> flinkCluster.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> flinkCluster.isRunning() && status == JobStatus.FINISHED, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
assertThat(jobStatus).isEqualTo(JobStatus.FINISHED);
runAfterJobTermination();
}
Aggregations