use of org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore in project flink by apache.
the class DispatcherCleanupITCase method testCleanupNotCancellable.
@Test
public void testCleanupNotCancellable() throws Exception {
final JobGraph jobGraph = createJobGraph();
final JobID jobId = jobGraph.getJobID();
final JobResultStore jobResultStore = new EmbeddedJobResultStore();
jobResultStore.createDirtyResult(new JobResultEntry(TestingJobResultStore.createSuccessfulJobResult(jobId)));
haServices.setJobResultStore(jobResultStore);
// Instantiates JobManagerRunner
final CompletableFuture<Void> jobManagerRunnerCleanupFuture = new CompletableFuture<>();
final AtomicReference<JobManagerRunner> jobManagerRunnerEntry = new AtomicReference<>();
final JobManagerRunnerRegistry jobManagerRunnerRegistry = TestingJobManagerRunnerRegistry.newSingleJobBuilder(jobManagerRunnerEntry).withLocalCleanupAsyncFunction((actualJobId, executor) -> jobManagerRunnerCleanupFuture).build();
final Dispatcher dispatcher = createTestingDispatcherBuilder().setJobManagerRunnerRegistry(jobManagerRunnerRegistry).build();
dispatcher.start();
toTerminate.add(dispatcher);
CommonTestUtils.waitUntilCondition(() -> jobManagerRunnerEntry.get() != null, Deadline.fromNow(Duration.ofSeconds(10)), "JobManagerRunner wasn't loaded in time.");
assertThat("The JobResultStore should have this job still marked as dirty.", haServices.getJobResultStore().hasDirtyJobResultEntry(jobId), CoreMatchers.is(true));
final DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class);
try {
dispatcherGateway.cancelJob(jobId, TIMEOUT).get();
Assert.fail("Should fail because cancelling the cleanup is not allowed.");
} catch (ExecutionException e) {
assertThat(e, FlinkMatchers.containsCause(JobCancellationFailedException.class));
}
jobManagerRunnerCleanupFuture.complete(null);
CommonTestUtils.waitUntilCondition(() -> haServices.getJobResultStore().hasCleanJobResultEntry(jobId), Deadline.fromNow(Duration.ofSeconds(60)), "The JobResultStore should have this job marked as clean now.");
}
use of org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore in project flink by apache.
the class ApplicationDispatcherBootstrapITCase method testDirtyJobResultRecoveryInApplicationMode.
@Test
public void testDirtyJobResultRecoveryInApplicationMode() throws Exception {
final Deadline deadline = Deadline.fromNow(TIMEOUT);
final Configuration configuration = new Configuration();
configuration.set(HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.ZOOKEEPER.name());
configuration.set(DeploymentOptions.TARGET, EmbeddedExecutor.NAME);
configuration.set(ClientOptions.CLIENT_RETRY_PERIOD, Duration.ofMillis(100));
final TestingMiniClusterConfiguration clusterConfiguration = TestingMiniClusterConfiguration.newBuilder().setConfiguration(configuration).build();
// having a dirty entry in the JobResultStore should make the ApplicationDispatcherBootstrap
// implementation fail to submit the job
final JobResultStore jobResultStore = new EmbeddedJobResultStore();
jobResultStore.createDirtyResult(new JobResultEntry(TestingJobResultStore.createSuccessfulJobResult(ApplicationDispatcherBootstrap.ZERO_JOB_ID)));
final EmbeddedHaServicesWithLeadershipControl haServices = new EmbeddedHaServicesWithLeadershipControl(TestingUtils.defaultExecutor()) {
@Override
public JobResultStore getJobResultStore() {
return jobResultStore;
}
};
final TestingMiniCluster.Builder clusterBuilder = TestingMiniCluster.newBuilder(clusterConfiguration).setHighAvailabilityServicesSupplier(() -> haServices).setDispatcherResourceManagerComponentFactorySupplier(createApplicationModeDispatcherResourceManagerComponentFactorySupplier(clusterConfiguration.getConfiguration(), ErrorHandlingSubmissionJob.createPackagedProgram()));
try (final MiniCluster cluster = clusterBuilder.build()) {
// start mini cluster and submit the job
cluster.start();
// the cluster should shut down automatically once the application completes
awaitClusterStopped(cluster, deadline);
}
FlinkAssertions.assertThatChainOfCauses(ErrorHandlingSubmissionJob.getSubmissionException()).as("The job's main method shouldn't have been succeeded due to a DuplicateJobSubmissionException.").hasAtLeastOneElementOfType(DuplicateJobSubmissionException.class);
assertThat(jobResultStore.hasDirtyJobResultEntry(ApplicationDispatcherBootstrap.ZERO_JOB_ID)).isFalse();
assertThat(jobResultStore.hasCleanJobResultEntry(ApplicationDispatcherBootstrap.ZERO_JOB_ID)).isTrue();
}
use of org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore in project flink by apache.
the class JobMasterServiceLeadershipRunnerTest method setup.
@Before
public void setup() {
leaderElectionService = new TestingLeaderElectionService();
jobResultStore = new EmbeddedJobResultStore();
fatalErrorHandler = new TestingFatalErrorHandler();
}
use of org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore in project flink by apache.
the class AbstractDispatcherTest method setUp.
@Before
public void setUp() throws Exception {
heartbeatServices = new HeartbeatServices(1000L, 10000L);
haServices = new TestingHighAvailabilityServices();
haServices.setCheckpointRecoveryFactory(new StandaloneCheckpointRecoveryFactory());
haServices.setResourceManagerLeaderRetriever(new SettableLeaderRetrievalService());
haServices.setJobGraphStore(new StandaloneJobGraphStore());
haServices.setJobResultStore(new EmbeddedJobResultStore());
configuration = new Configuration();
blobServer = new BlobServer(configuration, temporaryFolder.newFolder(), new VoidBlobStore());
}
use of org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedJobResultStore in project flink by apache.
the class DefaultDispatcherRunnerITCase method setup.
@Before
public void setup() {
dispatcherRunnerFactory = DefaultDispatcherRunnerFactory.createSessionRunner(SessionDispatcherFactory.INSTANCE);
jobGraph = createJobGraph();
dispatcherLeaderElectionService = new TestingLeaderElectionService();
fatalErrorHandler = new TestingFatalErrorHandler();
jobGraphStore = TestingJobGraphStore.newBuilder().build();
jobResultStore = new EmbeddedJobResultStore();
partialDispatcherServices = TestingPartialDispatcherServices.builder().withFatalErrorHandler(fatalErrorHandler).build(blobServerResource.getBlobServer(), new Configuration());
}
Aggregations