use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphDeploymentTest method testBuildDeploymentDescriptor.
@Test
public void testBuildDeploymentDescriptor() {
try {
final JobID jobId = new JobID();
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
final JobVertexID jid3 = new JobVertexID();
final JobVertexID jid4 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
JobVertex v3 = new JobVertex("v3", jid3);
JobVertex v4 = new JobVertex("v4", jid4);
v1.setParallelism(10);
v2.setParallelism(10);
v3.setParallelism(10);
v4.setParallelism(10);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v3.setInvokableClass(BatchTask.class);
v4.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, "some job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
List<JobVertex> ordered = Arrays.asList(v1, v2, v3, v4);
eg.attachJobGraph(ordered);
ExecutionJobVertex ejv = eg.getAllVertices().get(jid2);
ExecutionVertex vertex = ejv.getTaskVertices()[3];
ExecutionGraphTestUtils.SimpleActorGateway instanceGateway = new ExecutionGraphTestUtils.SimpleActorGateway(TestingUtils.directExecutionContext());
final Instance instance = getInstance(new ActorTaskManagerGateway(instanceGateway));
final SimpleSlot slot = instance.allocateSimpleSlot(jobId);
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.deployToSlot(slot);
assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
TaskDeploymentDescriptor descr = instanceGateway.lastTDD;
assertNotNull(descr);
JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
assertEquals(jobId, jobInformation.getJobId());
assertEquals(jid2, taskInformation.getJobVertexId());
assertEquals(3, descr.getSubtaskIndex());
assertEquals(10, taskInformation.getNumberOfSubtasks());
assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName());
assertEquals("v2", taskInformation.getTaskName());
Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions();
Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates();
assertEquals(2, producedPartitions.size());
assertEquals(1, consumedPartitions.size());
Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator();
Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator();
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorConsumedPartitions.next().getInputChannelDeploymentDescriptors().length);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphCheckpointCoordinatorTest method createExecutionGraphAndEnableCheckpointing.
private ExecutionGraph createExecutionGraphAndEnableCheckpointing(CheckpointIDCounter counter, CompletedCheckpointStore store) throws Exception {
ExecutionGraph executionGraph = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "test", new Configuration(), new SerializedValue<>(new ExecutionConfig()), Time.days(1L), new NoRestartStrategy(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), new Scheduler(TestingUtils.defaultExecutionContext()), ClassLoader.getSystemClassLoader(), new UnregisteredMetricsGroup());
executionGraph.enableCheckpointing(100, 100, 100, 1, ExternalizedCheckpointSettings.none(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), counter, store, null, null, CheckpointStatsTrackerTest.createTestTracker());
JobVertex jobVertex = new JobVertex("MockVertex");
jobVertex.setInvokableClass(AbstractInvokable.class);
executionGraph.attachJobGraph(Collections.singletonList(jobVertex));
return executionGraph;
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphRestartTest method testNoManualRestart.
@Test
public void testNoManualRestart() throws Exception {
NoRestartStrategy restartStrategy = new NoRestartStrategy();
Tuple2<ExecutionGraph, Instance> executionGraphInstanceTuple = createExecutionGraph(restartStrategy);
ExecutionGraph eg = executionGraphInstanceTuple.f0;
eg.getAllExecutionVertices().iterator().next().fail(new Exception("Test Exception"));
for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
vertex.getCurrentExecutionAttempt().cancelingComplete();
}
assertEquals(JobStatus.FAILED, eg.getState());
// This should not restart the graph.
eg.restart();
assertEquals(JobStatus.FAILED, eg.getState());
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ExecutionGraphSignalsTest method prepare.
@Before
public void prepare() throws Exception {
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
final Configuration cfg = new Configuration();
assert (mockEJV.length == 5);
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
JobVertex v4 = new JobVertex("vertex4");
JobVertex v5 = new JobVertex("vertex5");
for (int i = 0; i < mockEJV.length; ++i) {
mockEJV[i] = mock(ExecutionJobVertex.class);
this.mockEV[i] = new ExecutionVertex[dop[i]];
for (int j = 0; j < dop[i]; ++j) {
this.mockEV[i][j] = mock(ExecutionVertex.class);
}
when(mockEJV[i].getProducedDataSets()).thenReturn(new IntermediateResult[0]);
when(mockEJV[i].getTaskVertices()).thenReturn(this.mockEV[i]);
}
PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v1), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[0]);
PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v2), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[1]);
PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v3), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[2]);
PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v4), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[3]);
PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v5), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[4]);
v1.setParallelism(dop[0]);
v2.setParallelism(dop[1]);
v3.setParallelism(dop[2]);
v4.setParallelism(dop[3]);
v5.setParallelism(dop[4]);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
mockNumberOfInputs(1, 0);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
mockNumberOfInputs(3, 1);
v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
mockNumberOfInputs(3, 2);
v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
mockNumberOfInputs(4, 3);
v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
mockNumberOfInputs(4, 2);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
eg.attachJobGraph(ordered);
f = eg.getClass().getDeclaredField("state");
f.setAccessible(true);
}
use of org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy in project flink by apache.
the class ArchivedExecutionGraphTest method setupExecutionGraph.
@BeforeClass
public static void setupExecutionGraph() throws Exception {
// -------------------------------------------------------------------------------------------------------------
// Setup
// -------------------------------------------------------------------------------------------------------------
v1ID = new JobVertexID();
v2ID = new JobVertexID();
JobVertex v1 = new JobVertex("v1", v1ID);
JobVertex v2 = new JobVertex("v2", v2ID);
v1.setParallelism(1);
v2.setParallelism(2);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
List<JobVertex> vertices = new ArrayList<JobVertex>(Arrays.asList(v1, v2));
ExecutionConfig config = new ExecutionConfig();
config.setExecutionMode(ExecutionMode.BATCH_FORCED);
config.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration());
config.setParallelism(4);
config.enableObjectReuse();
config.setGlobalJobParameters(new TestJobParameters());
runtimeGraph = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "test job", new Configuration(), new SerializedValue<>(config), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), mock(SlotProvider.class));
runtimeGraph.attachJobGraph(vertices);
List<ExecutionJobVertex> jobVertices = new ArrayList<>();
jobVertices.add(runtimeGraph.getJobVertex(v1ID));
jobVertices.add(runtimeGraph.getJobVertex(v2ID));
CheckpointStatsTracker statsTracker = new CheckpointStatsTracker(0, jobVertices, mock(JobSnapshottingSettings.class), new UnregisteredMetricsGroup());
runtimeGraph.enableCheckpointing(100, 100, 100, 1, ExternalizedCheckpointSettings.none(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), null, null, statsTracker);
Map<String, Accumulator<?, ?>> userAccumulators = new HashMap<>();
userAccumulators.put("userAcc", new LongCounter(64));
Execution executionWithAccumulators = runtimeGraph.getJobVertex(v1ID).getTaskVertices()[0].getCurrentExecutionAttempt();
runtimeGraph.getJobVertex(v2ID).getTaskVertices()[0].getCurrentExecutionAttempt().fail(new RuntimeException("This exception was thrown on purpose."));
}
Aggregations