use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class FixedDelayRestartStrategyTest method testFixedDelayRestartStrategy.
@Test
public void testFixedDelayRestartStrategy() {
int numberRestarts = 10;
long restartDelay = 10;
FixedDelayRestartStrategy fixedDelayRestartStrategy = new FixedDelayRestartStrategy(numberRestarts, restartDelay);
ExecutionGraph executionGraph = mock(ExecutionGraph.class);
when(executionGraph.getFutureExecutor()).thenReturn(ExecutionContext$.MODULE$.fromExecutor(MoreExecutors.directExecutor()));
while (fixedDelayRestartStrategy.canRestart()) {
fixedDelayRestartStrategy.restart(executionGraph);
}
Mockito.verify(executionGraph, Mockito.times(numberRestarts)).restart();
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class CreatingExecutionGraph method handleExecutionGraphCreation.
private void handleExecutionGraphCreation(@Nullable ExecutionGraphWithVertexParallelism executionGraphWithVertexParallelism, @Nullable Throwable throwable) {
if (throwable != null) {
logger.info("Failed to go from {} to {} because the ExecutionGraph creation failed.", CreatingExecutionGraph.class.getSimpleName(), Executing.class.getSimpleName(), throwable);
context.goToFinished(context.getArchivedExecutionGraph(JobStatus.FAILED, throwable));
} else {
for (ExecutionVertex vertex : executionGraphWithVertexParallelism.executionGraph.getAllExecutionVertices()) {
vertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
}
final AssignmentResult result = context.tryToAssignSlots(executionGraphWithVertexParallelism);
if (result.isSuccess()) {
logger.debug("Successfully reserved and assigned the required slots for the ExecutionGraph.");
final ExecutionGraph executionGraph = result.getExecutionGraph();
final ExecutionGraphHandler executionGraphHandler = new ExecutionGraphHandler(executionGraph, getLogger(), context.getIOExecutor(), context.getMainThreadExecutor());
// Operator coordinator outlives the current state, so we need to use context as a
// global failure handler.
final OperatorCoordinatorHandler operatorCoordinatorHandler = operatorCoordinatorHandlerFactory.create(executionGraph, context);
operatorCoordinatorHandler.initializeOperatorCoordinators(context.getMainThreadExecutor());
operatorCoordinatorHandler.startAllOperatorCoordinators();
context.goToExecuting(result.getExecutionGraph(), executionGraphHandler, operatorCoordinatorHandler, Collections.emptyList());
} else {
logger.debug("Failed to reserve and assign the required slots. Waiting for new resources.");
context.goToWaitingForResources();
}
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class ExecutionGraphToInputsLocationsRetrieverAdapterTest method testGetNonExistingExecutionVertexWillThrowException.
/**
* Tests that it will throw exception when getting the task manager location of a non existing
* execution.
*/
@Test
public void testGetNonExistingExecutionVertexWillThrowException() throws Exception {
final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(jobVertex);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
ExecutionVertexID invalidExecutionVertexId = new ExecutionVertexID(new JobVertexID(), 0);
try {
inputsLocationsRetriever.getTaskManagerLocation(invalidExecutionVertexId);
fail("Should throw exception if execution vertex doesn't exist!");
} catch (IllegalStateException expected) {
// expect this exception
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class ExecutionGraphToInputsLocationsRetrieverAdapterTest method testGetEmptyTaskManagerLocationIfVertexNotScheduled.
/**
* Tests that it will get empty task manager location if vertex is not scheduled.
*/
@Test
public void testGetEmptyTaskManagerLocationIfVertexNotScheduled() throws Exception {
final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(jobVertex);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0);
Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocation = inputsLocationsRetriever.getTaskManagerLocation(executionVertexId);
assertFalse(taskManagerLocation.isPresent());
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class ExecutionGraphToInputsLocationsRetrieverAdapterTest method testGetConsumedResultPartitionsProducers.
/**
* Tests that can get the producers of consumed result partitions.
*/
@Test
public void testGetConsumedResultPartitionsProducers() throws Exception {
final JobVertex producer1 = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex producer2 = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex consumer = ExecutionGraphTestUtils.createNoOpVertex(1);
consumer.connectNewDataSetAsInput(producer1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
consumer.connectNewDataSetAsInput(producer2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(producer1, producer2, consumer);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
ExecutionVertexID evIdOfProducer1 = new ExecutionVertexID(producer1.getID(), 0);
ExecutionVertexID evIdOfProducer2 = new ExecutionVertexID(producer2.getID(), 0);
ExecutionVertexID evIdOfConsumer = new ExecutionVertexID(consumer.getID(), 0);
Collection<Collection<ExecutionVertexID>> producersOfProducer1 = inputsLocationsRetriever.getConsumedResultPartitionsProducers(evIdOfProducer1);
Collection<Collection<ExecutionVertexID>> producersOfProducer2 = inputsLocationsRetriever.getConsumedResultPartitionsProducers(evIdOfProducer2);
Collection<Collection<ExecutionVertexID>> producersOfConsumer = inputsLocationsRetriever.getConsumedResultPartitionsProducers(evIdOfConsumer);
assertThat(producersOfProducer1, is(empty()));
assertThat(producersOfProducer2, is(empty()));
assertThat(producersOfConsumer, hasSize(2));
assertThat(producersOfConsumer, hasItem(Collections.singletonList(evIdOfProducer1)));
assertThat(producersOfConsumer, hasItem(Collections.singletonList(evIdOfProducer2)));
}
Aggregations