use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class AdaptiveSchedulerITCase method testStopWithSavepointFailOnCheckpoint.
@Test
public void testStopWithSavepointFailOnCheckpoint() throws Exception {
StreamExecutionEnvironment env = getEnvWithSource(StopWithSavepointTestBehavior.FAIL_ON_CHECKPOINT);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L));
DummySource.resetForParallelism(PARALLELISM);
JobClient client = env.executeAsync();
DummySource.awaitRunning();
try {
client.stopWithSavepoint(false, tempFolder.newFolder("savepoint").getAbsolutePath(), SavepointFormatType.CANONICAL).get();
fail("Expect exception");
} catch (ExecutionException e) {
assertThat(e, containsCause(FlinkException.class));
}
// expect job to run again (maybe restart)
CommonTestUtils.waitUntilCondition(() -> client.getJobStatus().get() == JobStatus.RUNNING, Deadline.fromNow(Duration.of(1, ChronoUnit.MINUTES)));
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class AdaptiveSchedulerITCase method testStopWithSavepointFailOnFirstSavepointSucceedOnSecond.
@Test
public void testStopWithSavepointFailOnFirstSavepointSucceedOnSecond() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));
env.setParallelism(PARALLELISM);
env.addSource(new DummySource(StopWithSavepointTestBehavior.FAIL_ON_FIRST_CHECKPOINT_ONLY)).addSink(new DiscardingSink<>());
DummySource.resetForParallelism(PARALLELISM);
JobClient client = env.executeAsync();
DummySource.awaitRunning();
DummySource.resetForParallelism(PARALLELISM);
final File savepointDirectory = tempFolder.newFolder("savepoint");
try {
client.stopWithSavepoint(false, savepointDirectory.getAbsolutePath(), SavepointFormatType.CANONICAL).get();
fail("Expect failure of operation");
} catch (ExecutionException e) {
assertThat(e, containsCause(FlinkException.class));
}
DummySource.awaitRunning();
// ensure failed savepoint files have been removed from the directory.
// We execute this in a retry loop with a timeout, because the savepoint deletion happens
// asynchronously and is not bound to the job lifecycle. See FLINK-22493 for more details.
CommonTestUtils.waitUntilCondition(() -> isDirectoryEmpty(savepointDirectory), Deadline.fromNow(Duration.ofSeconds(10)));
// trigger second savepoint
final String savepoint = client.stopWithSavepoint(false, savepointDirectory.getAbsolutePath(), SavepointFormatType.CANONICAL).get();
assertThat(savepoint, containsString(savepointDirectory.getAbsolutePath()));
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ReactiveModeITCase method testScaleUpOnAdditionalTaskManager.
/**
* Test that a job scales up when a TaskManager gets added to the cluster.
*/
@Test
public void testScaleUpOnAdditionalTaskManager() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
final DataStream<String> input = env.addSource(new DummySource());
input.addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync();
waitUntilParallelismForVertexReached(miniClusterResource.getRestClusterClient(), jobClient.getJobID(), NUMBER_SLOTS_PER_TASK_MANAGER * INITIAL_NUMBER_TASK_MANAGERS);
// scale up to 2 TaskManagers:
miniClusterResource.getMiniCluster().startTaskManager();
waitUntilParallelismForVertexReached(miniClusterResource.getRestClusterClient(), jobClient.getJobID(), NUMBER_SLOTS_PER_TASK_MANAGER * (INITIAL_NUMBER_TASK_MANAGERS + 1));
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ReactiveModeITCase method testScaleLimitByMaxParallelism.
/**
* Users can set maxParallelism and reactive mode must not run with a parallelism higher than
* maxParallelism.
*/
@Test
public void testScaleLimitByMaxParallelism() throws Exception {
// test preparation: ensure we have 2 TaskManagers running
startAdditionalTaskManager();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// we set maxParallelism = 1 and assert it never exceeds it
final DataStream<String> input = env.addSource(new FailOnParallelExecutionSource()).setMaxParallelism(1);
input.addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync();
waitUntilParallelismForVertexReached(miniClusterResource.getRestClusterClient(), jobClient.getJobID(), 1);
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class TableEnvironmentImpl method executeQueryOperation.
private TableResultInternal executeQueryOperation(QueryOperation operation) {
CollectModifyOperation sinkOperation = new CollectModifyOperation(operation);
List<Transformation<?>> transformations = translate(Collections.singletonList(sinkOperation));
final String defaultJobName = "collect";
Pipeline pipeline = execEnv.createPipeline(transformations, tableConfig.getConfiguration(), defaultJobName);
try {
JobClient jobClient = execEnv.executeAsync(pipeline);
ResultProvider resultProvider = sinkOperation.getSelectResultProvider();
resultProvider.setJobClient(jobClient);
return TableResultImpl.builder().jobClient(jobClient).resultKind(ResultKind.SUCCESS_WITH_CONTENT).schema(operation.getResolvedSchema()).resultProvider(resultProvider).setPrintStyle(PrintStyle.tableauWithTypeInferredColumnWidths(// sinkOperation.getConsumedDataType() handles legacy types
DataTypeUtils.expandCompositeTypeToSchema(sinkOperation.getConsumedDataType()), resultProvider.getRowDataStringConverter(), PrintStyle.DEFAULT_MAX_COLUMN_WIDTH, false, isStreamingMode)).build();
} catch (Exception e) {
throw new TableException("Failed to execute sql", e);
}
}
Aggregations