use of org.apache.flink.api.common.JobSubmissionResult in project flink by apache.
the class SavepointMigrationTestBase method executeAndSavepoint.
@SafeVarargs
protected final void executeAndSavepoint(StreamExecutionEnvironment env, String savepointPath, Tuple2<String, Integer>... expectedAccumulators) throws Exception {
// Retrieve the job manager
ActorGateway jobManager = Await.result(cluster.leaderGateway().future(), DEADLINE.timeLeft());
// Submit the job
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
JobSubmissionResult jobSubmissionResult = cluster.submitJobDetached(jobGraph);
LOG.info("Submitted job {} and waiting...", jobSubmissionResult.getJobID());
StandaloneClusterClient clusterClient = new StandaloneClusterClient(cluster.configuration());
boolean done = false;
while (DEADLINE.hasTimeLeft()) {
Thread.sleep(100);
Map<String, Object> accumulators = clusterClient.getAccumulators(jobSubmissionResult.getJobID());
boolean allDone = true;
for (Tuple2<String, Integer> acc : expectedAccumulators) {
Integer numFinished = (Integer) accumulators.get(acc.f0);
if (numFinished == null) {
allDone = false;
break;
}
if (!numFinished.equals(acc.f1)) {
allDone = false;
break;
}
}
if (allDone) {
done = true;
break;
}
}
if (!done) {
fail("Did not see the expected accumulator results within time limit.");
}
LOG.info("Triggering savepoint.");
// Flink 1.2
final Future<Object> savepointResultFuture = jobManager.ask(new JobManagerMessages.TriggerSavepoint(jobSubmissionResult.getJobID(), Option.<String>empty()), DEADLINE.timeLeft());
// Flink 1.1
// final Future<Object> savepointResultFuture =
// jobManager.ask(new JobManagerMessages.TriggerSavepoint(jobSubmissionResult.getJobID()), DEADLINE.timeLeft());
Object savepointResult = Await.result(savepointResultFuture, DEADLINE.timeLeft());
if (savepointResult instanceof JobManagerMessages.TriggerSavepointFailure) {
fail("Error drawing savepoint: " + ((JobManagerMessages.TriggerSavepointFailure) savepointResult).cause());
}
// jobmanager will store savepoint in heap, we have to retrieve it
final String jobmanagerSavepointPath = ((JobManagerMessages.TriggerSavepointSuccess) savepointResult).savepointPath();
LOG.info("Saved savepoint: " + jobmanagerSavepointPath);
// Flink 1.2
FileUtils.moveFile(new File(new URI(jobmanagerSavepointPath).getPath()), new File(savepointPath));
// Flink 1.1
// Retrieve the savepoint from the testing job manager
// LOG.info("Requesting the savepoint.");
// Future<Object> savepointFuture = jobManager.ask(new TestingJobManagerMessages.RequestSavepoint(jobmanagerSavepointPath), DEADLINE.timeLeft());
//
// Savepoint savepoint = ((TestingJobManagerMessages.ResponseSavepoint) Await.result(savepointFuture, DEADLINE.timeLeft())).savepoint();
// LOG.info("Retrieved savepoint: " + jobmanagerSavepointPath + ".");
//
// LOG.info("Storing savepoint to file.");
// Configuration config = new Configuration();
// config.setString(org.apache.flink.runtime.checkpoint.savepoint.SavepointStoreFactory.SAVEPOINT_BACKEND_KEY, "filesystem");
// config.setString(org.apache.flink.runtime.checkpoint.savepoint.SavepointStoreFactory.SAVEPOINT_DIRECTORY_KEY, "file:///Users/aljoscha/Downloads");
// String path = org.apache.flink.runtime.checkpoint.savepoint.SavepointStoreFactory.createFromConfig(config).storeSavepoint(savepoint);
//
// FileUtils.moveFile(new File(new URI(path).getPath()), new File(savepointPath));
}
use of org.apache.flink.api.common.JobSubmissionResult in project flink by apache.
the class YarnClusterClientV2 method submitJob.
@Override
protected JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
try {
// Create application via yarnClient
final YarnClientApplication yarnApplication = yarnClient.createApplication();
ApplicationReport report = this.clusterDescriptor.startAppMaster(jobGraph, yarnClient, yarnApplication);
if (report.getYarnApplicationState().equals(YarnApplicationState.RUNNING)) {
appId = report.getApplicationId();
trackingURL = report.getTrackingUrl();
logAndSysout("Please refer to " + getWebInterfaceURL() + " for the running status of job " + jobGraph.getJobID().toString());
//TODO: not support attach mode now
return new JobSubmissionResult(jobGraph.getJobID());
} else {
throw new ProgramInvocationException("Fail to submit the job.");
}
} catch (Exception e) {
throw new ProgramInvocationException("Fail to submit the job", e.getCause());
}
}
Aggregations