use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class ZooKeeperDefaultDispatcherRunnerTest method createJobGraphWithBlobs.
private JobGraph createJobGraphWithBlobs() throws IOException {
final JobVertex vertex = new JobVertex("test vertex");
vertex.setInvokableClass(NoOpInvokable.class);
vertex.setParallelism(1);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(vertex);
final PermanentBlobKey permanentBlobKey = blobServer.putPermanent(jobGraph.getJobID(), new byte[256]);
jobGraph.addUserJarBlobKey(permanentBlobKey);
return jobGraph;
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class AllVerticesIteratorTest method testAllVertices.
@Test
public void testAllVertices() {
try {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
JobVertex v4 = new JobVertex("v4");
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v1.setParallelism(1);
v2.setParallelism(7);
v3.setParallelism(3);
v4.setParallelism(2);
ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(v1, v2, v3, v4);
ExecutionJobVertex ejv1 = eg.getJobVertex(v1.getID());
ExecutionJobVertex ejv2 = eg.getJobVertex(v2.getID());
ExecutionJobVertex ejv3 = eg.getJobVertex(v3.getID());
ExecutionJobVertex ejv4 = eg.getJobVertex(v4.getID());
AllVerticesIterator iter = new AllVerticesIterator(Arrays.asList(ejv1, ejv2, ejv3, ejv4).iterator());
int numReturned = 0;
while (iter.hasNext()) {
iter.hasNext();
Assert.assertNotNull(iter.next());
numReturned++;
}
Assert.assertEquals(13, numReturned);
} catch (Exception e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class StateAssignmentOperationTest method testChannelStateAssignmentDownscalingTwoDifferentGates.
@Test
public void testChannelStateAssignmentDownscalingTwoDifferentGates() throws JobException, JobExecutionException {
JobVertex upstream1 = createJobVertex(new OperatorID(), 2);
JobVertex upstream2 = createJobVertex(new OperatorID(), 2);
JobVertex downstream = createJobVertex(new OperatorID(), 2);
List<OperatorID> operatorIds = Stream.of(upstream1, upstream2, downstream).map(v -> v.getOperatorIDs().get(0).getGeneratedOperatorID()).collect(Collectors.toList());
Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, 3);
connectVertices(upstream1, downstream, ARBITRARY, RANGE);
connectVertices(upstream2, downstream, ROUND_ROBIN, ROUND_ROBIN);
Map<OperatorID, ExecutionJobVertex> vertices = toExecutionVertices(upstream1, upstream2, downstream);
new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false).assignStates();
assertEquals(new InflightDataRescalingDescriptor(array(gate(to(0, 1), mappings(to(0, 2), to(1)), set(1), RESCALING), gate(to(0, 2), mappings(to(0, 2), to(1)), emptySet(), RESCALING))), getAssignedState(vertices.get(operatorIds.get(2)), operatorIds.get(2), 0).getInputRescalingDescriptor());
assertEquals(new InflightDataRescalingDescriptor(array(gate(to(0, 1), mappings(to(0, 2), to(1)), set(1), RESCALING), gate(to(0, 2), mappings(to(0, 2), to(1)), emptySet(), RESCALING))), getAssignedState(vertices.get(operatorIds.get(2)), operatorIds.get(2), 0).getInputRescalingDescriptor());
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class StateAssignmentOperationTest method createJobVertex.
private JobVertex createJobVertex(OperatorID operatorID, OperatorID userDefinedOperatorId, int parallelism) {
JobVertex jobVertex = new JobVertex(operatorID.toHexString(), new JobVertexID(), singletonList(OperatorIDPair.of(operatorID, userDefinedOperatorId)));
jobVertex.setInvokableClass(NoOpInvokable.class);
jobVertex.setParallelism(parallelism);
return jobVertex;
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class StateAssignmentOperationTest method toExecutionVertices.
private Map<OperatorID, ExecutionJobVertex> toExecutionVertices(JobVertex... jobVertices) throws JobException, JobExecutionException {
JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(jobVertices);
ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).build();
return Arrays.stream(jobVertices).collect(Collectors.toMap(jobVertex -> jobVertex.getOperatorIDs().get(0).getGeneratedOperatorID(), jobVertex -> {
try {
return eg.getJobVertex(jobVertex.getID());
} catch (Exception e) {
throw new RuntimeException(e);
}
}));
}
Aggregations