use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.
the class PointwisePatternTest method testNTo2N.
@Test
public void testNTo2N() throws Exception {
final int N = 41;
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
v1.setParallelism(N);
v2.setParallelism(2 * N);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
} catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
for (ExecutionVertex ev : target.getTaskVertices()) {
assertEquals(1, ev.getNumberOfInputs());
ExecutionEdge[] inEdges = ev.getInputEdges(0);
assertEquals(1, inEdges.length);
assertEquals(ev.getParallelSubtaskIndex() / 2, inEdges[0].getSource().getPartitionNumber());
}
}
use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.
the class VertexSlotSharingTest method testAssignSlotSharingGroup.
/*
* Test setup:
* - v1 is isolated, no slot sharing
* - v2 and v3 (not connected) share slots
* - v4 and v5 (connected) share slots
*/
@Test
public void testAssignSlotSharingGroup() {
try {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
JobVertex v4 = new JobVertex("v4");
JobVertex v5 = new JobVertex("v5");
v1.setParallelism(4);
v2.setParallelism(5);
v3.setParallelism(7);
v4.setParallelism(1);
v5.setParallelism(11);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v5.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v4, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
SlotSharingGroup jg1 = new SlotSharingGroup();
v2.setSlotSharingGroup(jg1);
v3.setSlotSharingGroup(jg1);
SlotSharingGroup jg2 = new SlotSharingGroup();
v4.setSlotSharingGroup(jg2);
v5.setSlotSharingGroup(jg2);
List<JobVertex> vertices = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
eg.attachJobGraph(vertices);
// verify that the vertices are all in the same slot sharing group
SlotSharingGroup group1 = null;
SlotSharingGroup group2 = null;
// verify that v1 tasks have no slot sharing group
assertNull(eg.getJobVertex(v1.getID()).getSlotSharingGroup());
// v2 and v3 are shared
group1 = eg.getJobVertex(v2.getID()).getSlotSharingGroup();
assertNotNull(group1);
assertEquals(group1, eg.getJobVertex(v3.getID()).getSlotSharingGroup());
assertEquals(2, group1.getJobVertexIds().size());
assertTrue(group1.getJobVertexIds().contains(v2.getID()));
assertTrue(group1.getJobVertexIds().contains(v3.getID()));
// v4 and v5 are shared
group2 = eg.getJobVertex(v4.getID()).getSlotSharingGroup();
assertNotNull(group2);
assertEquals(group2, eg.getJobVertex(v5.getID()).getSlotSharingGroup());
assertEquals(2, group1.getJobVertexIds().size());
assertTrue(group2.getJobVertexIds().contains(v4.getID()));
assertTrue(group2.getJobVertexIds().contains(v5.getID()));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.
the class ExecutionVertexCancelTest method testActionsWhileCancelling.
@Test
public void testActionsWhileCancelling() {
try {
final JobVertexID jid = new JobVertexID();
final ExecutionJobVertex ejv = getExecutionVertex(jid);
// scheduling while canceling is an illegal state transition
try {
ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout());
setVertexState(vertex, ExecutionState.CANCELING);
Scheduler scheduler = mock(Scheduler.class);
vertex.scheduleForExecution(scheduler, false);
} catch (Exception e) {
fail("should not throw an exception");
}
// deploying while in canceling state is illegal (should immediately go to canceled)
try {
ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout());
setVertexState(vertex, ExecutionState.CANCELING);
Instance instance = getInstance(new ActorTaskManagerGateway(DummyActorGateway.INSTANCE));
SimpleSlot slot = instance.allocateSimpleSlot(new JobID());
vertex.deployToSlot(slot);
fail("Method should throw an exception");
} catch (IllegalStateException e) {
// that is what we expect
}
// fail while canceling
{
ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout());
Instance instance = getInstance(new ActorTaskManagerGateway(DummyActorGateway.INSTANCE));
SimpleSlot slot = instance.allocateSimpleSlot(new JobID());
setVertexResource(vertex, slot);
setVertexState(vertex, ExecutionState.CANCELING);
Exception failureCause = new Exception("test exception");
vertex.fail(failureCause);
assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());
assertTrue(slot.isReleased());
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.
the class ExecutionGraphTestUtils method getExecutionVertex.
public static ExecutionJobVertex getExecutionVertex(JobVertexID id, ScheduledExecutorService executor) throws Exception {
JobVertex ajv = new JobVertex("TestVertex", id);
ajv.setInvokableClass(mock(AbstractInvokable.class).getClass());
ExecutionGraph graph = new ExecutionGraph(executor, executor, new JobID(), "test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(ExecutionContext$.MODULE$.fromExecutor(executor)));
ExecutionJobVertex ejv = spy(new ExecutionJobVertex(graph, ajv, 1, AkkaUtils.getDefaultTimeout()));
Answer<Void> noop = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
return null;
}
};
doAnswer(noop).when(ejv).vertexCancelled(Matchers.anyInt());
doAnswer(noop).when(ejv).vertexFailed(Matchers.anyInt(), Matchers.any(Throwable.class));
doAnswer(noop).when(ejv).vertexFinished(Matchers.anyInt());
return ejv;
}
use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.
the class ExecutionVertexCancelTest method testScheduleOrDeployAfterCancel.
// --------------------------------------------------------------------------------------------
// Actions after a vertex has been canceled or while canceling
// --------------------------------------------------------------------------------------------
@Test
public void testScheduleOrDeployAfterCancel() {
try {
final JobVertexID jid = new JobVertexID();
final ExecutionJobVertex ejv = getExecutionVertex(jid);
final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout());
setVertexState(vertex, ExecutionState.CANCELED);
assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());
// 1)
// scheduling after being canceled should be tolerated (no exception) because
// it can occur as the result of races
{
Scheduler scheduler = mock(Scheduler.class);
vertex.scheduleForExecution(scheduler, false);
assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());
}
// the scheduler (or any caller) needs to know that the slot should be released
try {
Instance instance = getInstance(new ActorTaskManagerGateway(DummyActorGateway.INSTANCE));
SimpleSlot slot = instance.allocateSimpleSlot(new JobID());
vertex.deployToSlot(slot);
fail("Method should throw an exception");
} catch (IllegalStateException e) {
assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations