use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionStateProgressTest method testAccumulatedStateFinished.
@Test
public void testAccumulatedStateFinished() {
try {
final JobID jid = new JobID();
final JobVertexID vid = new JobVertexID();
JobVertex ajv = new JobVertex("TestVertex", vid);
ajv.setParallelism(3);
ajv.setInvokableClass(mock(AbstractInvokable.class).getClass());
ExecutionGraph graph = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jid, "test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
graph.attachJobGraph(Collections.singletonList(ajv));
setGraphStatus(graph, JobStatus.RUNNING);
ExecutionJobVertex ejv = graph.getJobVertex(vid);
// mock resources and mock taskmanager
for (ExecutionVertex ee : ejv.getTaskVertices()) {
SimpleSlot slot = getInstance(new ActorTaskManagerGateway(new SimpleActorGateway(TestingUtils.defaultExecutionContext()))).allocateSimpleSlot(jid);
ee.deployToSlot(slot);
}
// finish all
for (ExecutionVertex ee : ejv.getTaskVertices()) {
ee.executionFinished();
}
assertTrue(ejv.isInFinalState());
assertEquals(JobStatus.FINISHED, graph.getState());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphConstructionTest method testCreateSimpleGraphBipartite.
/**
* Creates a JobGraph of the following form:
*
* <pre>
* v1--->v2-->\
* \
* v4 --->\
* ----->/ \
* v3-->/ v5
* \ /
* ------------->/
* </pre>
*/
@Test
public void testCreateSimpleGraphBipartite() throws Exception {
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
final Configuration cfg = new Configuration();
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
JobVertex v4 = new JobVertex("vertex4");
JobVertex v5 = new JobVertex("vertex5");
v1.setParallelism(5);
v2.setParallelism(7);
v3.setParallelism(2);
v4.setParallelism(11);
v5.setParallelism(4);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v5.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
} catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
verifyTestGraph(eg, jobId, v1, v2, v3, v4, v5);
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphConstructionTest method testMoreThanOneConsumerForIntermediateResult.
@Test
public void testMoreThanOneConsumerForIntermediateResult() {
try {
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
final Configuration cfg = new Configuration();
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
v1.setParallelism(5);
v2.setParallelism(7);
v3.setParallelism(2);
IntermediateDataSet result = v1.createAndAddResultDataSet(ResultPartitionType.PIPELINED);
v2.connectDataSetAsInput(result, DistributionPattern.ALL_TO_ALL);
v3.connectDataSetAsInput(result, DistributionPattern.ALL_TO_ALL);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
fail("Should not be possible");
} catch (RuntimeException e) {
// expected
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphConstructionTest method testCoLocationConstraintCreation.
@Test
public void testCoLocationConstraintCreation() {
try {
final JobID jobId = new JobID();
final String jobName = "Co-Location Constraint Sample Job";
final Configuration cfg = new Configuration();
// simple group of two, cyclic
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
v1.setParallelism(6);
v2.setParallelism(4);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
SlotSharingGroup sl1 = new SlotSharingGroup();
v1.setSlotSharingGroup(sl1);
v2.setSlotSharingGroup(sl1);
v2.setStrictlyCoLocatedWith(v1);
v1.setStrictlyCoLocatedWith(v2);
// complex forked dependency pattern
JobVertex v3 = new JobVertex("vertex3");
JobVertex v4 = new JobVertex("vertex4");
JobVertex v5 = new JobVertex("vertex5");
JobVertex v6 = new JobVertex("vertex6");
JobVertex v7 = new JobVertex("vertex7");
v3.setParallelism(3);
v4.setParallelism(3);
v5.setParallelism(3);
v6.setParallelism(3);
v7.setParallelism(3);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v5.setInvokableClass(AbstractInvokable.class);
v6.setInvokableClass(AbstractInvokable.class);
v7.setInvokableClass(AbstractInvokable.class);
SlotSharingGroup sl2 = new SlotSharingGroup();
v3.setSlotSharingGroup(sl2);
v4.setSlotSharingGroup(sl2);
v5.setSlotSharingGroup(sl2);
v6.setSlotSharingGroup(sl2);
v7.setSlotSharingGroup(sl2);
v4.setStrictlyCoLocatedWith(v3);
v5.setStrictlyCoLocatedWith(v4);
v6.setStrictlyCoLocatedWith(v3);
v3.setStrictlyCoLocatedWith(v7);
// isolated vertex
JobVertex v8 = new JobVertex("vertex8");
v8.setParallelism(2);
v8.setInvokableClass(AbstractInvokable.class);
JobGraph jg = new JobGraph(jobId, jobName, v1, v2, v3, v4, v5, v6, v7, v8);
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
eg.attachJobGraph(jg.getVerticesSortedTopologicallyFromSources());
// check the v1 / v2 co location hints ( assumes parallelism(v1) >= parallelism(v2) )
{
ExecutionVertex[] v1s = eg.getJobVertex(v1.getID()).getTaskVertices();
ExecutionVertex[] v2s = eg.getJobVertex(v2.getID()).getTaskVertices();
Set<CoLocationConstraint> all = new HashSet<CoLocationConstraint>();
for (int i = 0; i < v2.getParallelism(); i++) {
assertNotNull(v1s[i].getLocationConstraint());
assertNotNull(v2s[i].getLocationConstraint());
assertTrue(v1s[i].getLocationConstraint() == v2s[i].getLocationConstraint());
all.add(v1s[i].getLocationConstraint());
}
for (int i = v2.getParallelism(); i < v1.getParallelism(); i++) {
assertNotNull(v1s[i].getLocationConstraint());
all.add(v1s[i].getLocationConstraint());
}
assertEquals("not all co location constraints are distinct", v1.getParallelism(), all.size());
}
// check the v1 / v2 co location hints ( assumes parallelism(v1) >= parallelism(v2) )
{
ExecutionVertex[] v3s = eg.getJobVertex(v3.getID()).getTaskVertices();
ExecutionVertex[] v4s = eg.getJobVertex(v4.getID()).getTaskVertices();
ExecutionVertex[] v5s = eg.getJobVertex(v5.getID()).getTaskVertices();
ExecutionVertex[] v6s = eg.getJobVertex(v6.getID()).getTaskVertices();
ExecutionVertex[] v7s = eg.getJobVertex(v7.getID()).getTaskVertices();
Set<CoLocationConstraint> all = new HashSet<CoLocationConstraint>();
for (int i = 0; i < v3.getParallelism(); i++) {
assertNotNull(v3s[i].getLocationConstraint());
assertTrue(v3s[i].getLocationConstraint() == v4s[i].getLocationConstraint());
assertTrue(v4s[i].getLocationConstraint() == v5s[i].getLocationConstraint());
assertTrue(v5s[i].getLocationConstraint() == v6s[i].getLocationConstraint());
assertTrue(v6s[i].getLocationConstraint() == v7s[i].getLocationConstraint());
all.add(v3s[i].getLocationConstraint());
}
assertEquals("not all co location constraints are distinct", v3.getParallelism(), all.size());
}
// check the v8 has no co location hints
{
ExecutionVertex[] v8s = eg.getJobVertex(v8.getID()).getTaskVertices();
for (int i = 0; i < v8.getParallelism(); i++) {
assertNull(v8s[i].getLocationConstraint());
}
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphConstructionTest method testSetupInputSplits.
@Test
public void testSetupInputSplits() {
try {
final InputSplit[] emptySplits = new InputSplit[0];
InputSplitAssigner assigner1 = mock(InputSplitAssigner.class);
InputSplitAssigner assigner2 = mock(InputSplitAssigner.class);
@SuppressWarnings("unchecked") InputSplitSource<InputSplit> source1 = mock(InputSplitSource.class);
@SuppressWarnings("unchecked") InputSplitSource<InputSplit> source2 = mock(InputSplitSource.class);
when(source1.createInputSplits(Matchers.anyInt())).thenReturn(emptySplits);
when(source2.createInputSplits(Matchers.anyInt())).thenReturn(emptySplits);
when(source1.getInputSplitAssigner(emptySplits)).thenReturn(assigner1);
when(source2.getInputSplitAssigner(emptySplits)).thenReturn(assigner2);
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
final Configuration cfg = new Configuration();
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
JobVertex v4 = new JobVertex("vertex4");
JobVertex v5 = new JobVertex("vertex5");
v1.setParallelism(5);
v2.setParallelism(7);
v3.setParallelism(2);
v4.setParallelism(11);
v5.setParallelism(4);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v5.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.setInputSplitSource(source1);
v5.setInputSplitSource(source2);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
} catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
assertEquals(assigner1, eg.getAllVertices().get(v3.getID()).getSplitAssigner());
assertEquals(assigner2, eg.getAllVertices().get(v5.getID()).getSplitAssigner());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations