use of org.apache.flink.runtime.scheduler.DefaultVertexParallelismStore in project flink by apache.
the class AdaptiveScheduler method computeReactiveModeVertexParallelismStore.
/**
* Creates the parallelism store for a set of vertices, optionally with a flag to leave the
* vertex parallelism unchanged. If the flag is set, the parallelisms must be valid for
* execution.
*
* <p>We need to set parallelism to the max possible value when requesting resources, but when
* executing the graph we should respect what we are actually given.
*
* @param vertices The vertices to store parallelism information for
* @param adjustParallelism Whether to adjust the parallelism
* @param defaultMaxParallelismFunc a function for computing a default max parallelism if none
* is specified on a given vertex
* @return The parallelism store.
*/
@VisibleForTesting
static VertexParallelismStore computeReactiveModeVertexParallelismStore(Iterable<JobVertex> vertices, Function<JobVertex, Integer> defaultMaxParallelismFunc, boolean adjustParallelism) {
DefaultVertexParallelismStore store = new DefaultVertexParallelismStore();
for (JobVertex vertex : vertices) {
// if no max parallelism was configured by the user, we calculate and set a default
final int maxParallelism = vertex.getMaxParallelism() == JobVertex.MAX_PARALLELISM_DEFAULT ? defaultMaxParallelismFunc.apply(vertex) : vertex.getMaxParallelism();
// If the parallelism has already been adjusted, respect what has been configured in the
// vertex. Otherwise, scale it to the max parallelism to attempt to be "as parallel as
// possible"
final int parallelism;
if (adjustParallelism) {
parallelism = maxParallelism;
} else {
parallelism = vertex.getParallelism();
}
VertexParallelismInformation parallelismInfo = new DefaultVertexParallelismInfo(parallelism, maxParallelism, // based on the computed default, when actually fewer are necessary.
(newMax) -> newMax >= maxParallelism ? Optional.empty() : Optional.of("Cannot lower max parallelism in Reactive mode."));
store.setParallelismInfo(vertex.getID(), parallelismInfo);
}
return store;
}
Aggregations