use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class SchedulingPipelinedRegionComputeUtil method buildOutEdgesDesc.
private static List<List<Integer>> buildOutEdgesDesc(final Map<SchedulingExecutionVertex, Set<SchedulingExecutionVertex>> vertexToRegion, final List<Set<SchedulingExecutionVertex>> regionList, final Function<ExecutionVertexID, ? extends SchedulingExecutionVertex> executionVertexRetriever) {
final Map<Set<SchedulingExecutionVertex>, Integer> regionIndices = new IdentityHashMap<>();
for (int i = 0; i < regionList.size(); i++) {
regionIndices.put(regionList.get(i), i);
}
final List<List<Integer>> outEdges = new ArrayList<>(regionList.size());
for (Set<SchedulingExecutionVertex> currentRegion : regionList) {
final List<Integer> currentRegionOutEdges = new ArrayList<>();
for (SchedulingExecutionVertex vertex : currentRegion) {
for (SchedulingResultPartition producedResult : vertex.getProducedResults()) {
if (!producedResult.getResultType().isReconnectable()) {
continue;
}
final Optional<ConsumerVertexGroup> consumerVertexGroup = producedResult.getConsumerVertexGroup();
if (!consumerVertexGroup.isPresent()) {
continue;
}
for (ExecutionVertexID consumerVertexId : consumerVertexGroup.get()) {
SchedulingExecutionVertex consumerVertex = executionVertexRetriever.apply(consumerVertexId);
// regions and cannot be merged
if (!vertexToRegion.containsKey(consumerVertex)) {
break;
}
if (!currentRegion.contains(consumerVertex)) {
currentRegionOutEdges.add(regionIndices.get(vertexToRegion.get(consumerVertex)));
}
}
}
}
outEdges.add(currentRegionOutEdges);
}
return outEdges;
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultScheduler method restartTasksWithDelay.
private void restartTasksWithDelay(final FailureHandlingResult failureHandlingResult) {
final Set<ExecutionVertexID> verticesToRestart = failureHandlingResult.getVerticesToRestart();
final Set<ExecutionVertexVersion> executionVertexVersions = new HashSet<>(executionVertexVersioner.recordVertexModifications(verticesToRestart).values());
final boolean globalRecovery = failureHandlingResult.isGlobalFailure();
addVerticesToRestartPending(verticesToRestart);
final CompletableFuture<?> cancelFuture = cancelTasksAsync(verticesToRestart);
final FailureHandlingResultSnapshot failureHandlingResultSnapshot = FailureHandlingResultSnapshot.create(failureHandlingResult, id -> this.getExecutionVertex(id).getCurrentExecutionAttempt());
delayExecutor.schedule(() -> FutureUtils.assertNoException(cancelFuture.thenRunAsync(() -> {
archiveFromFailureHandlingResult(failureHandlingResultSnapshot);
restartTasks(executionVertexVersions, globalRecovery);
}, getMainThreadExecutor())), failureHandlingResult.getRestartDelayMS(), TimeUnit.MILLISECONDS);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultScheduler method deployOrHandleError.
private BiFunction<Object, Throwable, Void> deployOrHandleError(final DeploymentHandle deploymentHandle) {
final ExecutionVertexVersion requiredVertexVersion = deploymentHandle.getRequiredVertexVersion();
final ExecutionVertexID executionVertexId = requiredVertexVersion.getExecutionVertexId();
return (ignored, throwable) -> {
if (executionVertexVersioner.isModified(requiredVertexVersion)) {
log.debug("Refusing to deploy execution vertex {} because this deployment was " + "superseded by another deployment", executionVertexId);
return null;
}
if (throwable == null) {
deployTaskSafe(executionVertexId);
} else {
handleTaskDeploymentFailure(executionVertexId, throwable);
}
return null;
};
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultPreferredLocationsRetriever method getInputLocationFutures.
private Collection<CompletableFuture<TaskManagerLocation>> getInputLocationFutures(final Set<ExecutionVertexID> producersToIgnore, final Collection<ExecutionVertexID> producers) {
final Collection<CompletableFuture<TaskManagerLocation>> locationsFutures = new ArrayList<>();
for (ExecutionVertexID producer : producers) {
final Optional<CompletableFuture<TaskManagerLocation>> optionalLocationFuture;
if (!producersToIgnore.contains(producer)) {
optionalLocationFuture = inputsLocationsRetriever.getTaskManagerLocation(producer);
} else {
optionalLocationFuture = Optional.empty();
}
optionalLocationFuture.ifPresent(locationsFutures::add);
// be a long time to wait for all the location futures to complete
if (locationsFutures.size() > MAX_DISTINCT_LOCATIONS_TO_CONSIDER) {
return Collections.emptyList();
}
}
return locationsFutures;
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class SchedulerBase method getInvolvedExecutionJobVertices.
private Set<ExecutionJobVertex> getInvolvedExecutionJobVertices(final Set<ExecutionVertexID> executionVertices) {
final Set<ExecutionJobVertex> tasks = new HashSet<>();
for (ExecutionVertexID executionVertexID : executionVertices) {
final ExecutionVertex executionVertex = getExecutionVertex(executionVertexID);
tasks.add(executionVertex.getJobVertex());
}
return tasks;
}
Aggregations