use of org.apache.flink.core.execution.JobClient in project zeppelin by apache.
the class Flink114Shims method executeMultipleInsertInto.
@Override
public boolean executeMultipleInsertInto(String jobName, Object tblEnv, InterpreterContext context) throws Exception {
JobClient jobClient = statementSetMap.get(context.getParagraphId()).execute().getJobClient().get();
while (!jobClient.getJobStatus().get().isTerminalState()) {
LOGGER.debug("Wait for job to finish");
Thread.sleep(1000 * 5);
}
if (jobClient.getJobStatus().get() == JobStatus.CANCELED) {
context.out.write("Job is cancelled.\n");
return false;
}
return true;
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class TableEnvironmentImpl method executeInternal.
private TableResultInternal executeInternal(List<Transformation<?>> transformations, List<String> sinkIdentifierNames) {
final String defaultJobName = "insert-into_" + String.join(",", sinkIdentifierNames);
Pipeline pipeline = execEnv.createPipeline(transformations, tableConfig.getConfiguration(), defaultJobName);
try {
JobClient jobClient = execEnv.executeAsync(pipeline);
final List<Column> columns = new ArrayList<>();
Long[] affectedRowCounts = new Long[transformations.size()];
for (int i = 0; i < transformations.size(); ++i) {
// use sink identifier name as field name
columns.add(Column.physical(sinkIdentifierNames.get(i), DataTypes.BIGINT()));
affectedRowCounts[i] = -1L;
}
return TableResultImpl.builder().jobClient(jobClient).resultKind(ResultKind.SUCCESS_WITH_CONTENT).schema(ResolvedSchema.of(columns)).resultProvider(new InsertResultProvider(affectedRowCounts).setJobClient(jobClient)).build();
} catch (Exception e) {
throw new TableException("Failed to execute sql", e);
}
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ContextEnvironment method executeAsync.
@Override
public JobClient executeAsync(String jobName) throws Exception {
validateAllowedExecution();
final JobClient jobClient = super.executeAsync(jobName);
if (!suppressSysout) {
System.out.println("Job has been submitted with JobID " + jobClient.getJobID());
}
LOG.info("Job has been submitted with JobID {}", jobClient.getJobID());
return jobClient;
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class AdaptiveSchedulerITCase method testStopWithSavepointNoError.
@Test
public void testStopWithSavepointNoError() throws Exception {
StreamExecutionEnvironment env = getEnvWithSource(StopWithSavepointTestBehavior.NO_FAILURE);
DummySource.resetForParallelism(PARALLELISM);
JobClient client = env.executeAsync();
DummySource.awaitRunning();
final File savepointDirectory = tempFolder.newFolder("savepoint");
final String savepoint = client.stopWithSavepoint(false, savepointDirectory.getAbsolutePath(), SavepointFormatType.CANONICAL).get();
assertThat(savepoint, containsString(savepointDirectory.getAbsolutePath()));
assertThat(client.getJobStatus().get(), is(JobStatus.FINISHED));
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class AdaptiveSchedulerITCase method testStopWithSavepointFailOnStop.
@Test
public void testStopWithSavepointFailOnStop() throws Exception {
StreamExecutionEnvironment env = getEnvWithSource(StopWithSavepointTestBehavior.FAIL_ON_CHECKPOINT_COMPLETE);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L));
DummySource.resetForParallelism(PARALLELISM);
JobClient client = env.executeAsync();
DummySource.awaitRunning();
try {
client.stopWithSavepoint(false, tempFolder.newFolder("savepoint").getAbsolutePath(), SavepointFormatType.CANONICAL).get();
fail("Expect exception");
} catch (ExecutionException e) {
assertThat(e, containsCause(FlinkException.class));
}
// expect job to run again (maybe restart)
CommonTestUtils.waitUntilCondition(() -> client.getJobStatus().get() == JobStatus.RUNNING, Deadline.fromNow(Duration.of(1, ChronoUnit.MINUTES)));
}
Aggregations