use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphConstructionTest method testCannotConnectWrongOrder.
@Test
public void testCannotConnectWrongOrder() throws Exception {
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
final Configuration cfg = new Configuration();
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
JobVertex v4 = new JobVertex("vertex4");
JobVertex v5 = new JobVertex("vertex5");
v1.setParallelism(5);
v2.setParallelism(7);
v3.setParallelism(2);
v4.setParallelism(11);
v5.setParallelism(4);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v5.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v5, v4));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
fail("Attached wrong jobgraph");
} catch (JobException e) {
// expected
}
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphDeploymentTest method testNoResourceAvailableFailure.
@Test
public /**
* Tests that a blocking batch job fails if there are not enough resources left to schedule the
* succeeding tasks. This test case is related to [FLINK-4296] where finished producing tasks
* swallow the fail exception when scheduling a consumer task.
*/
void testNoResourceAvailableFailure() throws Exception {
final JobID jobId = new JobID();
JobVertex v1 = new JobVertex("source");
JobVertex v2 = new JobVertex("sink");
int dop1 = 1;
int dop2 = 1;
v1.setParallelism(dop1);
v2.setParallelism(dop2);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
Scheduler scheduler = new Scheduler(TestingUtils.directExecutionContext());
for (int i = 0; i < dop1; i++) {
scheduler.newInstanceAvailable(ExecutionGraphTestUtils.getInstance(new ActorTaskManagerGateway(new ExecutionGraphTestUtils.SimpleActorGateway(TestingUtils.directExecutionContext()))));
}
// execution graph that executes actions synchronously
ExecutionGraph eg = new ExecutionGraph(new DirectScheduledExecutorService(), TestingUtils.defaultExecutor(), jobId, "failing test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), scheduler);
eg.setQueuedSchedulingAllowed(false);
List<JobVertex> ordered = Arrays.asList(v1, v2);
eg.attachJobGraph(ordered);
assertEquals(dop1, scheduler.getNumberOfAvailableSlots());
// schedule, this triggers mock deployment
eg.scheduleForExecution();
ExecutionAttemptID attemptID = eg.getJobVertex(v1.getID()).getTaskVertices()[0].getCurrentExecutionAttempt().getAttemptId();
eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.RUNNING));
eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.FINISHED, null));
assertEquals(JobStatus.FAILED, eg.getState());
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphDeploymentTest method setupExecution.
private Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> setupExecution(JobVertex v1, int dop1, JobVertex v2, int dop2) throws Exception {
final JobID jobId = new JobID();
v1.setParallelism(dop1);
v2.setParallelism(dop2);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
for (int i = 0; i < dop1 + dop2; i++) {
scheduler.newInstanceAvailable(ExecutionGraphTestUtils.getInstance(new ActorTaskManagerGateway(new ExecutionGraphTestUtils.SimpleActorGateway(TestingUtils.directExecutionContext()))));
}
// execution graph that executes actions synchronously
ExecutionGraph eg = new ExecutionGraph(new DirectScheduledExecutorService(), TestingUtils.defaultExecutor(), jobId, "some job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), scheduler);
eg.setQueuedSchedulingAllowed(false);
List<JobVertex> ordered = Arrays.asList(v1, v2);
eg.attachJobGraph(ordered);
assertEquals(dop1 + dop2, scheduler.getNumberOfAvailableSlots());
// schedule, this triggers mock deployment
eg.scheduleForExecution();
Map<ExecutionAttemptID, Execution> executions = eg.getRegisteredExecutions();
assertEquals(dop1 + dop2, executions.size());
return new Tuple2<>(eg, executions);
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class PointwisePatternTest method testNTo2N.
@Test
public void testNTo2N() throws Exception {
final int N = 41;
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
v1.setParallelism(N);
v2.setParallelism(2 * N);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
} catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
for (ExecutionVertex ev : target.getTaskVertices()) {
assertEquals(1, ev.getNumberOfInputs());
ExecutionEdge[] inEdges = ev.getInputEdges(0);
assertEquals(1, inEdges.length);
assertEquals(ev.getParallelSubtaskIndex() / 2, inEdges[0].getSource().getPartitionNumber());
}
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class VertexSlotSharingTest method testAssignSlotSharingGroup.
/*
* Test setup:
* - v1 is isolated, no slot sharing
* - v2 and v3 (not connected) share slots
* - v4 and v5 (connected) share slots
*/
@Test
public void testAssignSlotSharingGroup() {
try {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
JobVertex v4 = new JobVertex("v4");
JobVertex v5 = new JobVertex("v5");
v1.setParallelism(4);
v2.setParallelism(5);
v3.setParallelism(7);
v4.setParallelism(1);
v5.setParallelism(11);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v5.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v4, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
SlotSharingGroup jg1 = new SlotSharingGroup();
v2.setSlotSharingGroup(jg1);
v3.setSlotSharingGroup(jg1);
SlotSharingGroup jg2 = new SlotSharingGroup();
v4.setSlotSharingGroup(jg2);
v5.setSlotSharingGroup(jg2);
List<JobVertex> vertices = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
eg.attachJobGraph(vertices);
// verify that the vertices are all in the same slot sharing group
SlotSharingGroup group1 = null;
SlotSharingGroup group2 = null;
// verify that v1 tasks have no slot sharing group
assertNull(eg.getJobVertex(v1.getID()).getSlotSharingGroup());
// v2 and v3 are shared
group1 = eg.getJobVertex(v2.getID()).getSlotSharingGroup();
assertNotNull(group1);
assertEquals(group1, eg.getJobVertex(v3.getID()).getSlotSharingGroup());
assertEquals(2, group1.getJobVertexIds().size());
assertTrue(group1.getJobVertexIds().contains(v2.getID()));
assertTrue(group1.getJobVertexIds().contains(v3.getID()));
// v4 and v5 are shared
group2 = eg.getJobVertex(v4.getID()).getSlotSharingGroup();
assertNotNull(group2);
assertEquals(group2, eg.getJobVertex(v5.getID()).getSlotSharingGroup());
assertEquals(2, group1.getJobVertexIds().size());
assertTrue(group2.getJobVertexIds().contains(v4.getID()));
assertTrue(group2.getJobVertexIds().contains(v5.getID()));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations