use of org.apache.flink.util.FlinkException in project flink by apache.
the class RestClusterClientTest method testDisposeSavepoint.
@Test
public void testDisposeSavepoint() throws Exception {
final String savepointPath = "foobar";
final String exceptionMessage = "Test exception.";
final FlinkException testException = new FlinkException(exceptionMessage);
final TestSavepointDisposalHandlers testSavepointDisposalHandlers = new TestSavepointDisposalHandlers(savepointPath);
final TestSavepointDisposalHandlers.TestSavepointDisposalTriggerHandler testSavepointDisposalTriggerHandler = testSavepointDisposalHandlers.new TestSavepointDisposalTriggerHandler();
final TestSavepointDisposalHandlers.TestSavepointDisposalStatusHandler testSavepointDisposalStatusHandler = testSavepointDisposalHandlers.new TestSavepointDisposalStatusHandler(OptionalFailure.of(AsynchronousOperationInfo.complete()), OptionalFailure.of(AsynchronousOperationInfo.completeExceptional(new SerializedThrowable(testException))), OptionalFailure.ofFailure(testException));
try (TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint(testSavepointDisposalStatusHandler, testSavepointDisposalTriggerHandler)) {
RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort());
try {
{
final CompletableFuture<Acknowledge> disposeSavepointFuture = restClusterClient.disposeSavepoint(savepointPath);
assertThat(disposeSavepointFuture.get(), is(Acknowledge.get()));
}
{
final CompletableFuture<Acknowledge> disposeSavepointFuture = restClusterClient.disposeSavepoint(savepointPath);
try {
disposeSavepointFuture.get();
fail("Expected an exception");
} catch (ExecutionException ee) {
assertThat(ExceptionUtils.findThrowableWithMessage(ee, exceptionMessage).isPresent(), is(true));
}
}
{
try {
restClusterClient.disposeSavepoint(savepointPath).get();
fail("Expected an exception.");
} catch (ExecutionException ee) {
assertThat(ExceptionUtils.findThrowable(ee, RestClientException.class).isPresent(), is(true));
}
}
} finally {
restClusterClient.close();
}
}
}
use of org.apache.flink.util.FlinkException in project flink by apache.
the class DefaultPackagedProgramRetrieverTest method testFailIfJobDirDoesNotHaveEntryClass.
@Test
public void testFailIfJobDirDoesNotHaveEntryClass() {
try {
DefaultPackagedProgramRetriever.create(noEntryClassClasspathProvider.getDirectory(), testJobEntryClassClasspathProvider.getJobClassName(), ClasspathProvider.parametersForTestJob("suffix"), new Configuration());
fail("This case should throw exception !");
} catch (FlinkException e) {
assertThat(e, FlinkMatchers.containsMessage(String.format("Could not find the provided job class (%s) in the user lib directory.", testJobEntryClassClasspathProvider.getJobClassName())));
}
}
use of org.apache.flink.util.FlinkException in project flink by apache.
the class KafkaConnectorOptionsUtil method initializePartitioner.
/**
* Returns a class value with the given class name.
*/
private static <T> FlinkKafkaPartitioner<T> initializePartitioner(String name, ClassLoader classLoader) {
try {
Class<?> clazz = Class.forName(name, true, classLoader);
if (!FlinkKafkaPartitioner.class.isAssignableFrom(clazz)) {
throw new ValidationException(String.format("Sink partitioner class '%s' should extend from the required class %s", name, FlinkKafkaPartitioner.class.getName()));
}
@SuppressWarnings("unchecked") final FlinkKafkaPartitioner<T> kafkaPartitioner = InstantiationUtil.instantiate(name, FlinkKafkaPartitioner.class, classLoader);
return kafkaPartitioner;
} catch (ClassNotFoundException | FlinkException e) {
throw new ValidationException(String.format("Could not find and instantiate partitioner class '%s'", name), e);
}
}
use of org.apache.flink.util.FlinkException in project flink by apache.
the class AdaptiveScheduler method closeAsync.
@Override
public CompletableFuture<Void> closeAsync() {
LOG.debug("Closing the AdaptiveScheduler. Trying to suspend the current job execution.");
state.suspend(new FlinkException("AdaptiveScheduler is being stopped."));
Preconditions.checkState(state instanceof Finished, "Scheduler state should be finished after calling state.suspend.");
backgroundTask.abort();
// wait for the background task to finish and then close services
return FutureUtils.composeAfterwards(FutureUtils.runAfterwardsAsync(backgroundTask.getTerminationFuture(), () -> stopCheckpointServicesSafely(jobTerminationFuture.get()), getMainThreadExecutor()), checkpointsCleaner::closeAsync);
}
use of org.apache.flink.util.FlinkException in project flink by apache.
the class SchedulerBase method closeAsync.
@Override
public CompletableFuture<Void> closeAsync() {
mainThreadExecutor.assertRunningInMainThread();
final FlinkException cause = new FlinkException("Scheduler is being stopped.");
final CompletableFuture<Void> checkpointServicesShutdownFuture = FutureUtils.composeAfterwards(executionGraph.getTerminationFuture().thenAcceptAsync(this::shutDownCheckpointServices, getMainThreadExecutor()), checkpointsCleaner::closeAsync);
FutureUtils.assertNoException(checkpointServicesShutdownFuture);
incrementVersionsOfAllVertices();
cancelAllPendingSlotRequestsInternal();
executionGraph.suspend(cause);
operatorCoordinatorHandler.disposeAllOperatorCoordinators();
return checkpointServicesShutdownFuture;
}
Aggregations