use of org.apache.flink.runtime.minicluster.MiniCluster in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testPutSuspendedJobOnClusterShutdown.
/**
* Tests that a session cluster can terminate gracefully when jobs are still running.
*/
@Test
public void testPutSuspendedJobOnClusterShutdown() throws Exception {
File rootDir = temporaryFolder.newFolder();
try (final MiniCluster miniCluster = new ExecutionGraphInfoStoreTestUtils.PersistingMiniCluster(new MiniClusterConfiguration.Builder().build(), rootDir)) {
miniCluster.start();
final JobVertex vertex = new JobVertex("blockingVertex");
// The adaptive scheduler expects that every vertex has a configured parallelism
vertex.setParallelism(1);
vertex.setInvokableClass(ExecutionGraphInfoStoreTestUtils.SignallingBlockingNoOpInvokable.class);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(vertex);
miniCluster.submitJob(jobGraph);
ExecutionGraphInfoStoreTestUtils.SignallingBlockingNoOpInvokable.LATCH.await();
}
}
use of org.apache.flink.runtime.minicluster.MiniCluster in project flink by apache.
the class AbstractHAJobRunITCase method testJobExecutionInHaMode.
@Test
public void testJobExecutionInHaMode(@InjectMiniCluster MiniCluster flinkCluster) throws Exception {
final JobGraph jobGraph = JobGraphTestUtils.singleNoOpJobGraph();
// providing a timeout helps making the test fail in case some issue occurred while
// initializing the cluster
flinkCluster.submitJob(jobGraph).get(30, TimeUnit.SECONDS);
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
final JobStatus jobStatus = FutureUtils.retrySuccessfulWithDelay(() -> flinkCluster.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> flinkCluster.isRunning() && status == JobStatus.FINISHED, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
assertThat(jobStatus).isEqualTo(JobStatus.FINISHED);
runAfterJobTermination();
}
use of org.apache.flink.runtime.minicluster.MiniCluster in project flink by apache.
the class ShuffleMasterTest method testShuffleMasterLifeCycle.
@Test
public void testShuffleMasterLifeCycle() throws Exception {
try (MiniCluster cluster = new MiniCluster(createClusterConfiguration(false))) {
cluster.start();
cluster.executeJobBlocking(createJobGraph());
}
assertTrue(TestShuffleMaster.currentInstance.get().closed.get());
String[] expectedPartitionEvents = new String[] { PARTITION_REGISTRATION_EVENT, PARTITION_REGISTRATION_EVENT, EXTERNAL_PARTITION_RELEASE_EVENT, EXTERNAL_PARTITION_RELEASE_EVENT };
assertArrayEquals(expectedPartitionEvents, TestShuffleMaster.partitionEvents.toArray());
}
use of org.apache.flink.runtime.minicluster.MiniCluster in project flink by apache.
the class MemoryExecutionGraphInfoStoreTest method testPutSuspendedJobOnClusterShutdown.
/**
* Tests that a session cluster can terminate gracefully when jobs are still running.
*/
@Test
public void testPutSuspendedJobOnClusterShutdown() throws Exception {
Configuration configuration = new Configuration();
configuration.set(JobManagerOptions.JOB_STORE_TYPE, JobManagerOptions.JobStoreType.Memory);
try (final MiniCluster miniCluster = new ExecutionGraphInfoStoreTestUtils.PersistingMiniCluster(new MiniClusterConfiguration.Builder().setConfiguration(configuration).build())) {
miniCluster.start();
final JobVertex vertex = new JobVertex("blockingVertex");
// The adaptive scheduler expects that every vertex has a configured parallelism
vertex.setParallelism(1);
vertex.setInvokableClass(ExecutionGraphInfoStoreTestUtils.SignallingBlockingNoOpInvokable.class);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(vertex);
miniCluster.submitJob(jobGraph);
ExecutionGraphInfoStoreTestUtils.SignallingBlockingNoOpInvokable.LATCH.await();
}
}
use of org.apache.flink.runtime.minicluster.MiniCluster in project flink by apache.
the class CoordinatorEventsExactlyOnceITCase method startMiniCluster.
@BeforeClass
public static void startMiniCluster() throws Exception {
final Configuration config = new Configuration();
config.setString(RestOptions.BIND_PORT, "0");
final MiniClusterConfiguration clusterCfg = new MiniClusterConfiguration.Builder().setNumTaskManagers(2).setNumSlotsPerTaskManager(1).setConfiguration(config).build();
miniCluster = new MiniCluster(clusterCfg);
miniCluster.start();
}
Aggregations