use of org.apache.flink.api.common.operators.ResourceSpec in project flink by apache.
the class StreamingJobGraphGeneratorTest method testManagedMemoryFractionForUnknownResourceSpec.
@Test
public void testManagedMemoryFractionForUnknownResourceSpec() throws Exception {
final ResourceSpec resource = ResourceSpec.UNKNOWN;
final List<ResourceSpec> resourceSpecs = Arrays.asList(resource, resource, resource, resource);
final Configuration taskManagerConfig = new Configuration() {
{
set(TaskManagerOptions.MANAGED_MEMORY_CONSUMER_WEIGHTS, new HashMap<String, String>() {
{
put(TaskManagerOptions.MANAGED_MEMORY_CONSUMER_NAME_DATAPROC, "6");
put(TaskManagerOptions.MANAGED_MEMORY_CONSUMER_NAME_PYTHON, "4");
}
});
}
};
final List<Map<ManagedMemoryUseCase, Integer>> operatorScopeManagedMemoryUseCaseWeights = new ArrayList<>();
final List<Set<ManagedMemoryUseCase>> slotScopeManagedMemoryUseCases = new ArrayList<>();
// source: batch
operatorScopeManagedMemoryUseCaseWeights.add(Collections.singletonMap(ManagedMemoryUseCase.OPERATOR, 1));
slotScopeManagedMemoryUseCases.add(Collections.emptySet());
// map1: batch, python
operatorScopeManagedMemoryUseCaseWeights.add(Collections.singletonMap(ManagedMemoryUseCase.OPERATOR, 1));
slotScopeManagedMemoryUseCases.add(Collections.singleton(ManagedMemoryUseCase.PYTHON));
// map3: python
operatorScopeManagedMemoryUseCaseWeights.add(Collections.emptyMap());
slotScopeManagedMemoryUseCases.add(Collections.singleton(ManagedMemoryUseCase.PYTHON));
// map3: batch
operatorScopeManagedMemoryUseCaseWeights.add(Collections.singletonMap(ManagedMemoryUseCase.OPERATOR, 1));
slotScopeManagedMemoryUseCases.add(Collections.emptySet());
// slotSharingGroup1 contains batch and python use cases: v1(source[batch]) -> map1[batch,
// python]), v2(map2[python])
// slotSharingGroup2 contains batch use case only: v3(map3[batch])
final JobGraph jobGraph = createJobGraphForManagedMemoryFractionTest(resourceSpecs, operatorScopeManagedMemoryUseCaseWeights, slotScopeManagedMemoryUseCases);
final JobVertex vertex1 = jobGraph.getVerticesSortedTopologicallyFromSources().get(0);
final JobVertex vertex2 = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
final JobVertex vertex3 = jobGraph.getVerticesSortedTopologicallyFromSources().get(2);
final StreamConfig sourceConfig = new StreamConfig(vertex1.getConfiguration());
verifyFractions(sourceConfig, 0.6 / 2, 0.0, 0.0, taskManagerConfig);
final StreamConfig map1Config = Iterables.getOnlyElement(sourceConfig.getTransitiveChainedTaskConfigs(StreamingJobGraphGeneratorTest.class.getClassLoader()).values());
verifyFractions(map1Config, 0.6 / 2, 0.4, 0.0, taskManagerConfig);
final StreamConfig map2Config = new StreamConfig(vertex2.getConfiguration());
verifyFractions(map2Config, 0.0, 0.4, 0.0, taskManagerConfig);
final StreamConfig map3Config = new StreamConfig(vertex3.getConfiguration());
verifyFractions(map3Config, 1.0, 0.0, 0.0, taskManagerConfig);
}
use of org.apache.flink.api.common.operators.ResourceSpec in project flink by apache.
the class StreamGraph method createIterationSourceAndSink.
public Tuple2<StreamNode, StreamNode> createIterationSourceAndSink(int loopId, int sourceId, int sinkId, long timeout, int parallelism, int maxParallelism, ResourceSpec minResources, ResourceSpec preferredResources) {
final String coLocationGroup = "IterationCoLocationGroup-" + loopId;
StreamNode source = this.addNode(sourceId, null, coLocationGroup, StreamIterationHead.class, null, ITERATION_SOURCE_NAME_PREFIX + "-" + loopId);
sources.add(source.getId());
setParallelism(source.getId(), parallelism);
setMaxParallelism(source.getId(), maxParallelism);
setResources(source.getId(), minResources, preferredResources);
StreamNode sink = this.addNode(sinkId, null, coLocationGroup, StreamIterationTail.class, null, ITERATION_SINK_NAME_PREFIX + "-" + loopId);
sinks.add(sink.getId());
setParallelism(sink.getId(), parallelism);
setMaxParallelism(sink.getId(), parallelism);
// The tail node is always in the same slot sharing group with the head node
// so that they can share resources (they do not use non-sharable resources,
// i.e. managed memory). There is no contract on how the resources should be
// divided for head and tail nodes at the moment. To be simple, we assign all
// resources to the head node and set the tail node resources to be zero if
// resources are specified.
final ResourceSpec tailResources = minResources.equals(ResourceSpec.UNKNOWN) ? ResourceSpec.UNKNOWN : ResourceSpec.ZERO;
setResources(sink.getId(), tailResources, tailResources);
iterationSourceSinkPairs.add(new Tuple2<>(source, sink));
this.vertexIDtoBrokerID.put(source.getId(), "broker-" + loopId);
this.vertexIDtoBrokerID.put(sink.getId(), "broker-" + loopId);
this.vertexIDtoLoopTimeout.put(source.getId(), timeout);
this.vertexIDtoLoopTimeout.put(sink.getId(), timeout);
return new Tuple2<>(source, sink);
}
Aggregations