use of java.util.concurrent.CompletionException in project flink by apache.
the class SchedulerBase method triggerCheckpoint.
@Override
public CompletableFuture<String> triggerCheckpoint() {
mainThreadExecutor.assertRunningInMainThread();
final CheckpointCoordinator checkpointCoordinator = executionGraph.getCheckpointCoordinator();
final JobID jobID = jobGraph.getJobID();
if (checkpointCoordinator == null) {
throw new IllegalStateException(String.format("Job %s is not a streaming job.", jobID));
}
log.info("Triggering a manual checkpoint for job {}.", jobID);
return checkpointCoordinator.triggerCheckpoint(false).thenApply(CompletedCheckpoint::getExternalPointer).handleAsync((path, throwable) -> {
if (throwable != null) {
throw new CompletionException(throwable);
}
return path;
}, mainThreadExecutor);
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class SchedulerBase method triggerSavepoint.
@Override
public CompletableFuture<String> triggerSavepoint(final String targetDirectory, final boolean cancelJob, final SavepointFormatType formatType) {
mainThreadExecutor.assertRunningInMainThread();
final CheckpointCoordinator checkpointCoordinator = executionGraph.getCheckpointCoordinator();
StopWithSavepointTerminationManager.checkSavepointActionPreconditions(checkpointCoordinator, targetDirectory, getJobId(), log);
log.info("Triggering {}savepoint for job {}.", cancelJob ? "cancel-with-" : "", jobGraph.getJobID());
if (cancelJob) {
stopCheckpointScheduler();
}
return checkpointCoordinator.triggerSavepoint(targetDirectory, formatType).thenApply(CompletedCheckpoint::getExternalPointer).handleAsync((path, throwable) -> {
if (throwable != null) {
if (cancelJob) {
startCheckpointScheduler();
}
throw new CompletionException(throwable);
} else if (cancelJob) {
log.info("Savepoint stored in {}. Now cancelling {}.", path, jobGraph.getJobID());
cancel();
}
return path;
}, mainThreadExecutor);
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class StateWithExecutionGraph method triggerCheckpoint.
CompletableFuture<String> triggerCheckpoint() {
final CheckpointCoordinator checkpointCoordinator = executionGraph.getCheckpointCoordinator();
final JobID jobID = executionGraph.getJobID();
if (checkpointCoordinator == null) {
throw new IllegalStateException(String.format("Job %s is not a streaming job.", jobID));
}
logger.info("Triggering a checkpoint for job {}.", jobID);
return checkpointCoordinator.triggerCheckpoint(false).thenApply(CompletedCheckpoint::getExternalPointer).handleAsync((path, throwable) -> {
if (throwable != null) {
throw new CompletionException(throwable);
}
return path;
}, context.getMainThreadExecutor());
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class ParameterToolTest method testConcurrentExecutionConfigSerialization.
/**
* Tests that we can concurrently serialize and access the ParameterTool. See FLINK-7943
*/
@Test
public void testConcurrentExecutionConfigSerialization() throws ExecutionException, InterruptedException {
final int numInputs = 10;
Collection<String> input = new ArrayList<>(numInputs);
for (int i = 0; i < numInputs; i++) {
input.add("--" + UUID.randomUUID());
input.add(UUID.randomUUID().toString());
}
final String[] args = input.toArray(new String[0]);
final ParameterTool parameterTool = (ParameterTool) createParameterToolFromArgs(args);
final int numThreads = 5;
final int numSerializations = 100;
final Collection<CompletableFuture<Void>> futures = new ArrayList<>(numSerializations);
final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
try {
for (int i = 0; i < numSerializations; i++) {
futures.add(CompletableFuture.runAsync(() -> {
try {
serializeDeserialize(parameterTool);
} catch (Exception e) {
throw new CompletionException(e);
}
}, executorService));
}
for (CompletableFuture<Void> future : futures) {
future.get();
}
} finally {
executorService.shutdownNow();
executorService.awaitTermination(1000L, TimeUnit.MILLISECONDS);
}
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class KubernetesStateHandleStore method releaseAndTryRemoveAll.
/**
* Remove all the state handle keys in the ConfigMap and discard the states.
*
* @throws Exception when removing the keys or discarding the state failed
*/
@Override
public void releaseAndTryRemoveAll() throws Exception {
final List<RetrievableStateHandle<T>> validStateHandles = new ArrayList<>();
kubeClient.checkAndUpdateConfigMap(configMapName, c -> {
if (isValidOperation(c)) {
final Map<String, String> updateData = new HashMap<>(c.getData());
c.getData().entrySet().stream().filter(entry -> configMapKeyFilter.test(entry.getKey())).forEach(entry -> {
try {
validStateHandles.add(deserializeObject(entry.getValue()));
updateData.remove(entry.getKey());
} catch (IOException e) {
LOG.warn("ConfigMap {} contained corrupted data. Ignoring the key {}.", configMapName, entry.getKey());
}
});
c.getData().clear();
c.getData().putAll(updateData);
return Optional.of(c);
}
return Optional.empty();
}).whenComplete((succeed, ignore) -> {
if (succeed) {
Exception exception = null;
for (RetrievableStateHandle<T> stateHandle : validStateHandles) {
try {
stateHandle.discardState();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
}
if (exception != null) {
throw new CompletionException(new KubernetesException("Could not properly remove all state handles.", exception));
}
}
}).get();
}
Aggregations