use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class ExecutionGraphTestUtils method verifyGeneratedExecutionJobVertex.
// ------------------------------------------------------------------------
// graph vertex verifications
// ------------------------------------------------------------------------
/**
* Verifies the generated {@link ExecutionJobVertex} for a given {@link JobVertex} in a {@link
* ExecutionGraph}.
*
* @param executionGraph the generated execution graph
* @param originJobVertex the vertex to verify for
* @param inputJobVertices upstream vertices of the verified vertex, used to check inputs of
* generated vertex
* @param outputJobVertices downstream vertices of the verified vertex, used to check produced
* data sets of generated vertex
*/
public static void verifyGeneratedExecutionJobVertex(ExecutionGraph executionGraph, JobVertex originJobVertex, @Nullable List<JobVertex> inputJobVertices, @Nullable List<JobVertex> outputJobVertices) {
ExecutionJobVertex ejv = executionGraph.getAllVertices().get(originJobVertex.getID());
assertNotNull(ejv);
// verify basic properties
assertEquals(originJobVertex.getParallelism(), ejv.getParallelism());
assertEquals(executionGraph.getJobID(), ejv.getJobId());
assertEquals(originJobVertex.getID(), ejv.getJobVertexId());
assertEquals(originJobVertex, ejv.getJobVertex());
// verify produced data sets
if (outputJobVertices == null) {
assertEquals(0, ejv.getProducedDataSets().length);
} else {
assertEquals(outputJobVertices.size(), ejv.getProducedDataSets().length);
for (int i = 0; i < outputJobVertices.size(); i++) {
assertEquals(originJobVertex.getProducedDataSets().get(i).getId(), ejv.getProducedDataSets()[i].getId());
assertEquals(originJobVertex.getParallelism(), ejv.getProducedDataSets()[0].getPartitions().length);
}
}
// verify task vertices for their basic properties and their inputs
assertEquals(originJobVertex.getParallelism(), ejv.getTaskVertices().length);
int subtaskIndex = 0;
for (ExecutionVertex ev : ejv.getTaskVertices()) {
assertEquals(executionGraph.getJobID(), ev.getJobId());
assertEquals(originJobVertex.getID(), ev.getJobvertexId());
assertEquals(originJobVertex.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(subtaskIndex, ev.getParallelSubtaskIndex());
if (inputJobVertices == null) {
assertEquals(0, ev.getNumberOfInputs());
} else {
assertEquals(inputJobVertices.size(), ev.getNumberOfInputs());
for (int i = 0; i < inputJobVertices.size(); i++) {
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(i);
assertEquals(inputJobVertices.get(i).getParallelism(), consumedPartitionGroup.size());
int expectedPartitionNum = 0;
for (IntermediateResultPartitionID consumedPartitionId : consumedPartitionGroup) {
assertEquals(expectedPartitionNum, consumedPartitionId.getPartitionNumber());
expectedPartitionNum++;
}
}
}
subtaskIndex++;
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class ExecutionGraphVariousFailuesTest method testFailingNotifyPartitionDataAvailable.
/**
* Tests that a failing notifyPartitionDataAvailable call with a non-existing execution attempt
* id, will not fail the execution graph.
*/
@Test
public void testFailingNotifyPartitionDataAvailable() throws Exception {
final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(JobGraphTestUtils.emptyJobGraph(), ComponentMainThreadExecutorServiceAdapter.forMainThread()).build();
scheduler.startScheduling();
final ExecutionGraph eg = scheduler.getExecutionGraph();
assertEquals(JobStatus.RUNNING, eg.getState());
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
IntermediateResultPartitionID intermediateResultPartitionId = new IntermediateResultPartitionID();
ExecutionAttemptID producerId = new ExecutionAttemptID();
ResultPartitionID resultPartitionId = new ResultPartitionID(intermediateResultPartitionId, producerId);
try {
scheduler.notifyPartitionDataAvailable(resultPartitionId);
fail("Error expected.");
} catch (IllegalStateException e) {
// we've expected this exception to occur
assertThat(e.getMessage(), containsString("Cannot find execution for execution Id"));
}
assertEquals(JobStatus.RUNNING, eg.getState());
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class ExecutionVertexTest method testResetForNewExecutionReleasesPartitions.
@Test
public void testResetForNewExecutionReleasesPartitions() throws Exception {
final JobVertex producerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex consumerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
consumerJobVertex.connectNewDataSetAsInput(producerJobVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final CompletableFuture<Collection<ResultPartitionID>> releasePartitionsFuture = new CompletableFuture<>();
final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
partitionTracker.setStopTrackingAndReleasePartitionsConsumer(releasePartitionsFuture::complete);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producerJobVertex, consumerJobVertex);
final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setPartitionTracker(partitionTracker).build();
scheduler.startScheduling();
final ExecutionJobVertex producerExecutionJobVertex = scheduler.getExecutionJobVertex(producerJobVertex.getID());
Execution execution = producerExecutionJobVertex.getTaskVertices()[0].getCurrentExecutionAttempt();
assertFalse(releasePartitionsFuture.isDone());
execution.markFinished();
assertFalse(releasePartitionsFuture.isDone());
for (ExecutionVertex executionVertex : producerExecutionJobVertex.getTaskVertices()) {
executionVertex.resetForNewExecution();
}
final IntermediateResultPartitionID intermediateResultPartitionID = producerExecutionJobVertex.getProducedDataSets()[0].getPartitions()[0].getPartitionId();
final ResultPartitionID resultPartitionID = execution.getResultPartitionDeploymentDescriptor(intermediateResultPartitionID).get().getShuffleDescriptor().getResultPartitionID();
assertThat(releasePartitionsFuture.get()).contains(resultPartitionID);
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class PointwisePatternTest method testHighToLow.
private void testHighToLow(int highDop, int lowDop) throws Exception {
if (highDop < lowDop) {
throw new IllegalArgumentException();
}
final int factor = highDop / lowDop;
final int delta = highDop % lowDop == 0 ? 0 : 1;
ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(highDop, lowDop);
int[] timesUsed = new int[highDop];
for (ExecutionVertex ev : target.getTaskVertices()) {
assertEquals(1, ev.getNumberOfInputs());
List<IntermediateResultPartitionID> consumedPartitions = new ArrayList<>();
for (ConsumedPartitionGroup partitionGroup : ev.getAllConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : partitionGroup) {
consumedPartitions.add(partitionId);
}
}
assertTrue(consumedPartitions.size() >= factor && consumedPartitions.size() <= factor + delta);
for (IntermediateResultPartitionID consumedPartition : consumedPartitions) {
timesUsed[consumedPartition.getPartitionNumber()]++;
}
}
for (int used : timesUsed) {
assertEquals(1, used);
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class PointwisePatternTest method testConnections.
/**
* Verify the connections between upstream result partitions and downstream vertices.
*/
private void testConnections(int sourceParallelism, int targetParallelism, int[][] expectedConsumedPartitionNumber) throws Exception {
ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(sourceParallelism, targetParallelism);
for (int vertexIndex = 0; vertexIndex < target.getTaskVertices().length; vertexIndex++) {
ExecutionVertex ev = target.getTaskVertices()[vertexIndex];
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
assertEquals(expectedConsumedPartitionNumber[vertexIndex].length, consumedPartitionGroup.size());
int partitionIndex = 0;
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
assertEquals(expectedConsumedPartitionNumber[vertexIndex][partitionIndex++], partitionId.getPartitionNumber());
}
}
}
Aggregations