use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class SsgNetworkMemoryCalculationUtilsTest method testGenerateEnrichedResourceProfile.
@Test
public void testGenerateEnrichedResourceProfile() throws Exception {
SlotSharingGroup slotSharingGroup0 = new SlotSharingGroup();
slotSharingGroup0.setResourceProfile(DEFAULT_RESOURCE);
SlotSharingGroup slotSharingGroup1 = new SlotSharingGroup();
slotSharingGroup1.setResourceProfile(DEFAULT_RESOURCE);
createExecutionGraphAndEnrichNetworkMemory(Arrays.asList(slotSharingGroup0, slotSharingGroup0, slotSharingGroup1));
assertEquals(new MemorySize(TestShuffleMaster.computeRequiredShuffleMemoryBytes(0, 2) + TestShuffleMaster.computeRequiredShuffleMemoryBytes(1, 6)), slotSharingGroup0.getResourceProfile().getNetworkMemory());
assertEquals(new MemorySize(TestShuffleMaster.computeRequiredShuffleMemoryBytes(5, 0)), slotSharingGroup1.getResourceProfile().getNetworkMemory());
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class PartialConsumePipelinedResultTest method testPartialConsumePipelinedResultReceiver.
/**
* Tests a fix for FLINK-1930.
*
* <p>When consuming a pipelined result only partially, is is possible that local channels
* release the buffer pool, which is associated with the result partition, too early. If the
* producer is still producing data when this happens, it runs into an IllegalStateException,
* because of the destroyed buffer pool.
*
* @see <a href="https://issues.apache.org/jira/browse/FLINK-1930">FLINK-1930</a>
*/
@Test
public void testPartialConsumePipelinedResultReceiver() throws Exception {
final JobVertex sender = new JobVertex("Sender");
sender.setInvokableClass(SlowBufferSender.class);
sender.setParallelism(PARALLELISM);
final JobVertex receiver = new JobVertex("Receiver");
receiver.setInvokableClass(SingleBufferReceiver.class);
receiver.setParallelism(PARALLELISM);
// The partition needs to be pipelined, otherwise the original issue does not occur, because
// the sender and receiver are not online at the same time.
receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(sender, receiver);
final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
sender.setSlotSharingGroup(slotSharingGroup);
receiver.setSlotSharingGroup(slotSharingGroup);
MINI_CLUSTER_RESOURCE.getMiniCluster().executeJobBlocking(jobGraph);
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class StreamingJobGraphGenerator method buildVertexRegionSlotSharingGroups.
/**
* Maps a vertex to its region slot sharing group. If {@link
* StreamGraph#isAllVerticesInSameSlotSharingGroupByDefault()} returns true, all regions will be
* in the same slot sharing group.
*/
private Map<JobVertexID, SlotSharingGroup> buildVertexRegionSlotSharingGroups() {
final Map<JobVertexID, SlotSharingGroup> vertexRegionSlotSharingGroups = new HashMap<>();
final SlotSharingGroup defaultSlotSharingGroup = new SlotSharingGroup();
streamGraph.getSlotSharingGroupResource(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP).ifPresent(defaultSlotSharingGroup::setResourceProfile);
final boolean allRegionsInSameSlotSharingGroup = streamGraph.isAllVerticesInSameSlotSharingGroupByDefault();
final Iterable<DefaultLogicalPipelinedRegion> regions = DefaultLogicalTopology.fromJobGraph(jobGraph).getAllPipelinedRegions();
for (DefaultLogicalPipelinedRegion region : regions) {
final SlotSharingGroup regionSlotSharingGroup;
if (allRegionsInSameSlotSharingGroup) {
regionSlotSharingGroup = defaultSlotSharingGroup;
} else {
regionSlotSharingGroup = new SlotSharingGroup();
streamGraph.getSlotSharingGroupResource(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP).ifPresent(regionSlotSharingGroup::setResourceProfile);
}
for (LogicalVertex vertex : region.getVertices()) {
vertexRegionSlotSharingGroups.put(vertex.getId(), regionSlotSharingGroup);
}
}
return vertexRegionSlotSharingGroups;
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class StreamingJobGraphGenerator method setManagedMemoryFractionForSlotSharingGroup.
private static void setManagedMemoryFractionForSlotSharingGroup(final SlotSharingGroup slotSharingGroup, final Map<JobVertexID, Integer> vertexHeadOperators, final Map<JobVertexID, Set<Integer>> vertexOperators, final Map<Integer, StreamConfig> operatorConfigs, final Map<Integer, Map<Integer, StreamConfig>> vertexChainedConfigs, final java.util.function.Function<Integer, Map<ManagedMemoryUseCase, Integer>> operatorScopeManagedMemoryUseCaseWeightsRetriever, final java.util.function.Function<Integer, Set<ManagedMemoryUseCase>> slotScopeManagedMemoryUseCasesRetriever) {
final Set<Integer> groupOperatorIds = slotSharingGroup.getJobVertexIds().stream().flatMap((vid) -> vertexOperators.get(vid).stream()).collect(Collectors.toSet());
final Map<ManagedMemoryUseCase, Integer> groupOperatorScopeUseCaseWeights = groupOperatorIds.stream().flatMap((oid) -> operatorScopeManagedMemoryUseCaseWeightsRetriever.apply(oid).entrySet().stream()).collect(Collectors.groupingBy(Map.Entry::getKey, Collectors.summingInt(Map.Entry::getValue)));
final Set<ManagedMemoryUseCase> groupSlotScopeUseCases = groupOperatorIds.stream().flatMap((oid) -> slotScopeManagedMemoryUseCasesRetriever.apply(oid).stream()).collect(Collectors.toSet());
for (JobVertexID jobVertexID : slotSharingGroup.getJobVertexIds()) {
for (int operatorNodeId : vertexOperators.get(jobVertexID)) {
final StreamConfig operatorConfig = operatorConfigs.get(operatorNodeId);
final Map<ManagedMemoryUseCase, Integer> operatorScopeUseCaseWeights = operatorScopeManagedMemoryUseCaseWeightsRetriever.apply(operatorNodeId);
final Set<ManagedMemoryUseCase> slotScopeUseCases = slotScopeManagedMemoryUseCasesRetriever.apply(operatorNodeId);
setManagedMemoryFractionForOperator(operatorScopeUseCaseWeights, slotScopeUseCases, groupOperatorScopeUseCaseWeights, groupSlotScopeUseCases, operatorConfig);
}
// need to refresh the chained task configs because they are serialized
final int headOperatorNodeId = vertexHeadOperators.get(jobVertexID);
final StreamConfig vertexConfig = operatorConfigs.get(headOperatorNodeId);
vertexConfig.setTransitiveChainedTaskConfigs(vertexChainedConfigs.get(headOperatorNodeId));
}
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class JobRecoveryITCase method createjobGraph.
private JobGraph createjobGraph(boolean slotSharingEnabled) throws IOException {
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(PARALLELISM);
sender.setInvokableClass(TestingAbstractInvokables.Sender.class);
final JobVertex receiver = new JobVertex("Receiver");
receiver.setParallelism(PARALLELISM);
receiver.setInvokableClass(FailingOnceReceiver.class);
FailingOnceReceiver.reset();
if (slotSharingEnabled) {
final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
receiver.setSlotSharingGroup(slotSharingGroup);
sender.setSlotSharingGroup(slotSharingGroup);
}
receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));
return JobGraphBuilder.newStreamingJobGraphBuilder().addJobVertices(Arrays.asList(sender, receiver)).setJobName(getClass().getSimpleName()).setExecutionConfig(executionConfig).build();
}
Aggregations