use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class StreamContextEnvironment method executeAsync.
@Override
public JobClient executeAsync(StreamGraph streamGraph) throws Exception {
validateAllowedExecution();
final JobClient jobClient = super.executeAsync(streamGraph);
if (!suppressSysout) {
System.out.println("Job has been submitted with JobID " + jobClient.getJobID());
}
return jobClient;
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class StreamContextEnvironment method execute.
@Override
public JobExecutionResult execute(StreamGraph streamGraph) throws Exception {
checkNotAllowedConfigurations();
final JobClient jobClient = executeAsync(streamGraph);
final List<JobListener> jobListeners = getJobListeners();
try {
final JobExecutionResult jobExecutionResult = getJobExecutionResult(jobClient);
jobListeners.forEach(jobListener -> jobListener.onJobExecuted(jobExecutionResult, null));
return jobExecutionResult;
} catch (Throwable t) {
jobListeners.forEach(jobListener -> jobListener.onJobExecuted(null, ExceptionUtils.stripExecutionException(t)));
ExceptionUtils.rethrowException(t);
// never reached, only make javac happy
return null;
}
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class AbstractSessionClusterExecutor method execute.
@Override
public CompletableFuture<JobClient> execute(@Nonnull final Pipeline pipeline, @Nonnull final Configuration configuration, @Nonnull final ClassLoader userCodeClassloader) throws Exception {
final JobGraph jobGraph = PipelineExecutorUtils.getJobGraph(pipeline, configuration);
try (final ClusterDescriptor<ClusterID> clusterDescriptor = clusterClientFactory.createClusterDescriptor(configuration)) {
final ClusterID clusterID = clusterClientFactory.getClusterId(configuration);
checkState(clusterID != null);
final ClusterClientProvider<ClusterID> clusterClientProvider = clusterDescriptor.retrieve(clusterID);
ClusterClient<ClusterID> clusterClient = clusterClientProvider.getClusterClient();
return clusterClient.submitJob(jobGraph).thenApplyAsync(FunctionUtils.uncheckedFunction(jobId -> {
ClientUtils.waitUntilJobInitializationFinished(() -> clusterClient.getJobStatus(jobId).get(), () -> clusterClient.requestJobResult(jobId).get(), userCodeClassloader);
return jobId;
})).thenApplyAsync(jobID -> (JobClient) new ClusterClientJobClientAdapter<>(clusterClientProvider, jobID, userCodeClassloader)).whenCompleteAsync((ignored1, ignored2) -> clusterClient.close());
}
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ContextEnvironment method execute.
@Override
public JobExecutionResult execute(String jobName) throws Exception {
final JobClient jobClient = executeAsync(jobName);
final List<JobListener> jobListeners = getJobListeners();
try {
final JobExecutionResult jobExecutionResult = getJobExecutionResult(jobClient);
jobListeners.forEach(jobListener -> jobListener.onJobExecuted(jobExecutionResult, null));
return jobExecutionResult;
} catch (Throwable t) {
jobListeners.forEach(jobListener -> jobListener.onJobExecuted(null, ExceptionUtils.stripExecutionException(t)));
ExceptionUtils.rethrowException(t);
// never reached, only make javac happy
return null;
}
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ExecutionEnvironment method executeAsync.
/**
* Triggers the program execution asynchronously. The environment will execute all parts of the
* program that have resulted in a "sink" operation. Sink operations are for example printing
* results ({@link DataSet#print()}, writing results (e.g. {@link DataSet#writeAsText(String)},
* {@link DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other
* generic data sinks created with {@link
* DataSet#output(org.apache.flink.api.common.io.OutputFormat)}.
*
* <p>The program execution will be logged and displayed with the given job name.
*
* @return A {@link JobClient} that can be used to communicate with the submitted job, completed
* on submission succeeded.
* @throws Exception Thrown, if the program submission fails.
*/
@PublicEvolving
public JobClient executeAsync(String jobName) throws Exception {
checkNotNull(configuration.get(DeploymentOptions.TARGET), "No execution.target specified in your configuration file.");
final Plan plan = createProgramPlan(jobName);
final PipelineExecutorFactory executorFactory = executorServiceLoader.getExecutorFactory(configuration);
checkNotNull(executorFactory, "Cannot find compatible factory for specified execution.target (=%s)", configuration.get(DeploymentOptions.TARGET));
CompletableFuture<JobClient> jobClientFuture = executorFactory.getExecutor(configuration).execute(plan, configuration, userClassloader);
try {
JobClient jobClient = jobClientFuture.get();
jobListeners.forEach(jobListener -> jobListener.onJobSubmitted(jobClient, null));
return jobClient;
} catch (Throwable t) {
jobListeners.forEach(jobListener -> jobListener.onJobSubmitted(null, t));
ExceptionUtils.rethrow(t);
// make javac happy, this code path will not be reached
return null;
}
}
Aggregations