use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class Execution method allocateSlotForExecution.
public Future<SimpleSlot> allocateSlotForExecution(SlotProvider slotProvider, boolean queued) throws IllegalExecutionStateException {
checkNotNull(slotProvider);
final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup();
final CoLocationConstraint locationConstraint = vertex.getLocationConstraint();
// sanity check
if (locationConstraint != null && sharingGroup == null) {
throw new IllegalStateException("Trying to schedule with co-location constraint but without slot sharing allowed.");
}
// this method only works if the execution is in the state 'CREATED'
if (transitionState(CREATED, SCHEDULED)) {
ScheduledUnit toSchedule = locationConstraint == null ? new ScheduledUnit(this, sharingGroup) : new ScheduledUnit(this, sharingGroup, locationConstraint);
return slotProvider.allocateSlot(toSchedule, queued);
} else {
// call race, already deployed, or already done
throw new IllegalExecutionStateException(this, CREATED, state);
}
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class JobGraphGenerator method compileJobGraph.
public JobGraph compileJobGraph(OptimizedPlan program, JobID jobId) {
if (program == null) {
throw new NullPointerException("Program is null, did you called " + "ExecutionEnvironment.execute()");
}
if (jobId == null) {
jobId = JobID.generate();
}
this.vertices = new HashMap<PlanNode, JobVertex>();
this.chainedTasks = new HashMap<PlanNode, TaskInChain>();
this.chainedTasksInSequence = new ArrayList<TaskInChain>();
this.auxVertices = new ArrayList<JobVertex>();
this.iterations = new HashMap<IterationPlanNode, IterationDescriptor>();
this.iterationStack = new ArrayList<IterationPlanNode>();
this.sharingGroup = new SlotSharingGroup();
// this starts the traversal that generates the job graph
program.accept(this);
// sanity check that we are not somehow in an iteration at the end
if (this.currentIteration != null) {
throw new CompilerException("The graph translation ended prematurely, leaving an unclosed iteration.");
}
// finalize the iterations
for (IterationDescriptor iteration : this.iterations.values()) {
if (iteration.getIterationNode() instanceof BulkIterationPlanNode) {
finalizeBulkIteration(iteration);
} else if (iteration.getIterationNode() instanceof WorksetIterationPlanNode) {
finalizeWorksetIteration(iteration);
} else {
throw new CompilerException();
}
}
// parents' configurations
for (TaskInChain tic : this.chainedTasksInSequence) {
TaskConfig t = new TaskConfig(tic.getContainingVertex().getConfiguration());
t.addChainedTask(tic.getChainedTask(), tic.getTaskConfig(), tic.getTaskName());
}
// ----- attach the additional info to the job vertices, for display in the runtime monitor
attachOperatorNamesAndDescriptions();
for (JobVertex vertex : this.auxVertices) {
vertex.setSlotSharingGroup(sharingGroup);
}
final Map<String, DistributedCache.DistributedCacheEntry> userArtifacts = JobGraphUtils.prepareUserArtifactEntries(program.getOriginalPlan().getCachedFiles().stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)), jobId);
// create the job graph object
final JobGraph graph;
try {
graph = JobGraphBuilder.newBatchJobGraphBuilder().setJobId(jobId).setJobName(program.getJobName()).setExecutionConfig(program.getOriginalPlan().getExecutionConfig()).addJobVertices(vertices.values()).addJobVertices(auxVertices).addUserArtifacts(userArtifacts).build();
} catch (IOException e) {
throw new CompilerException("Could not serialize the ExecutionConfig." + "This indicates that non-serializable types (like custom serializers) were registered");
}
// release all references again
this.vertices = null;
this.chainedTasks = null;
this.chainedTasksInSequence = null;
this.auxVertices = null;
this.iterations = null;
this.iterationStack = null;
// return job graph
return graph;
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class FileBufferReaderITCase method createJobGraph.
private static JobGraph createJobGraph() {
final SlotSharingGroup group1 = new SlotSharingGroup();
final SlotSharingGroup group2 = new SlotSharingGroup();
final JobVertex source = new JobVertex("source");
source.setInvokableClass(TestSourceInvokable.class);
source.setParallelism(parallelism);
source.setSlotSharingGroup(group1);
final JobVertex sink = new JobVertex("sink");
sink.setInvokableClass(TestSinkInvokable.class);
sink.setParallelism(parallelism);
sink.setSlotSharingGroup(group2);
sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
return JobGraphTestUtils.batchJobGraph(source, sink);
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class TaskExecutorITCase method createJobGraph.
private JobGraph createJobGraph(int parallelism) {
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(parallelism);
sender.setInvokableClass(TestingAbstractInvokables.Sender.class);
final JobVertex receiver = new JobVertex("Blocking receiver");
receiver.setParallelism(parallelism);
receiver.setInvokableClass(BlockingOperator.class);
BlockingOperator.reset();
receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
sender.setSlotSharingGroup(slotSharingGroup);
receiver.setSlotSharingGroup(slotSharingGroup);
return JobGraphTestUtils.streamingJobGraph(sender, receiver);
}
use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.
the class NetworkStackThroughputITCase method createJobGraph.
private JobGraph createJobGraph(int dataVolumeGb, boolean useForwarder, boolean isSlowSender, boolean isSlowReceiver, int numSubtasks) {
SlotSharingGroup sharingGroup = new SlotSharingGroup();
final List<JobVertex> jobVertices = new ArrayList<>();
JobVertex producer = new JobVertex("Speed Test Producer");
producer.setSlotSharingGroup(sharingGroup);
producer.setInvokableClass(SpeedTestProducer.class);
producer.setParallelism(numSubtasks);
producer.getConfiguration().setInteger(DATA_VOLUME_GB_CONFIG_KEY, dataVolumeGb);
producer.getConfiguration().setBoolean(IS_SLOW_SENDER_CONFIG_KEY, isSlowSender);
jobVertices.add(producer);
JobVertex forwarder = null;
if (useForwarder) {
forwarder = new JobVertex("Speed Test Forwarder");
forwarder.setSlotSharingGroup(sharingGroup);
forwarder.setInvokableClass(SpeedTestForwarder.class);
forwarder.setParallelism(numSubtasks);
jobVertices.add(forwarder);
}
JobVertex consumer = new JobVertex("Speed Test Consumer");
consumer.setSlotSharingGroup(sharingGroup);
consumer.setInvokableClass(SpeedTestConsumer.class);
consumer.setParallelism(numSubtasks);
consumer.getConfiguration().setBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, isSlowReceiver);
jobVertices.add(consumer);
if (useForwarder) {
forwarder.connectNewDataSetAsInput(producer, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
consumer.connectNewDataSetAsInput(forwarder, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
} else {
consumer.connectNewDataSetAsInput(producer, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
}
return JobGraphTestUtils.streamingJobGraph(jobVertices.toArray(new JobVertex[0]));
}
Aggregations