use of org.apache.flink.runtime.scheduler.SchedulerBase in project flink by apache.
the class DefaultExecutionGraphDeploymentTest method testRegistrationOfExecutionsFinishing.
@Test
public void testRegistrationOfExecutionsFinishing() {
try {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
SchedulerBase scheduler = setupScheduler(v1, 7650, v2, 2350);
Collection<Execution> executions = new ArrayList<>(scheduler.getExecutionGraph().getRegisteredExecutions().values());
for (Execution e : executions) {
e.markFinished();
}
assertEquals(0, scheduler.getExecutionGraph().getRegisteredExecutions().size());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.scheduler.SchedulerBase in project flink by apache.
the class DefaultExecutionGraphDeploymentTest method testAccumulatorsAndMetricsStorage.
/**
* Verifies that {@link Execution#completeCancelling(Map, IOMetrics, boolean)} and {@link
* Execution#markFailed(Throwable, boolean, Map, IOMetrics, boolean, boolean)} store the given
* accumulators and metrics correctly.
*/
@Test
public void testAccumulatorsAndMetricsStorage() throws Exception {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
SchedulerBase scheduler = setupScheduler(v1, 1, v2, 1);
Map<ExecutionAttemptID, Execution> executions = scheduler.getExecutionGraph().getRegisteredExecutions();
IOMetrics ioMetrics = new IOMetrics(0, 0, 0, 0);
Map<String, Accumulator<?, ?>> accumulators = Collections.emptyMap();
Execution execution1 = executions.values().iterator().next();
execution1.cancel();
execution1.completeCancelling(accumulators, ioMetrics, false);
assertEquals(ioMetrics, execution1.getIOMetrics());
assertEquals(accumulators, execution1.getUserAccumulators());
Execution execution2 = executions.values().iterator().next();
execution2.markFailed(new Throwable(), false, accumulators, ioMetrics, false, true);
assertEquals(ioMetrics, execution2.getIOMetrics());
assertEquals(accumulators, execution2.getUserAccumulators());
}
use of org.apache.flink.runtime.scheduler.SchedulerBase in project flink by apache.
the class DefaultExecutionGraphDeploymentTest method testAccumulatorsAndMetricsForwarding.
/**
* Verifies that {@link SchedulerNG#updateTaskExecutionState(TaskExecutionState)} updates the
* accumulators and metrics for an execution that failed or was canceled.
*/
@Test
public void testAccumulatorsAndMetricsForwarding() throws Exception {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
SchedulerBase scheduler = setupScheduler(v1, 1, v2, 1);
ExecutionGraph graph = scheduler.getExecutionGraph();
Map<ExecutionAttemptID, Execution> executions = graph.getRegisteredExecutions();
// verify behavior for canceled executions
Execution execution1 = executions.values().iterator().next();
IOMetrics ioMetrics = new IOMetrics(0, 0, 0, 0);
Map<String, Accumulator<?, ?>> accumulators = new HashMap<>();
accumulators.put("acc", new IntCounter(4));
AccumulatorSnapshot accumulatorSnapshot = new AccumulatorSnapshot(graph.getJobID(), execution1.getAttemptId(), accumulators);
TaskExecutionState state = new TaskExecutionState(execution1.getAttemptId(), ExecutionState.CANCELED, null, accumulatorSnapshot, ioMetrics);
scheduler.updateTaskExecutionState(state);
assertEquals(ioMetrics, execution1.getIOMetrics());
assertNotNull(execution1.getUserAccumulators());
assertEquals(4, execution1.getUserAccumulators().get("acc").getLocalValue());
// verify behavior for failed executions
Execution execution2 = executions.values().iterator().next();
IOMetrics ioMetrics2 = new IOMetrics(0, 0, 0, 0);
Map<String, Accumulator<?, ?>> accumulators2 = new HashMap<>();
accumulators2.put("acc", new IntCounter(8));
AccumulatorSnapshot accumulatorSnapshot2 = new AccumulatorSnapshot(graph.getJobID(), execution2.getAttemptId(), accumulators2);
TaskExecutionState state2 = new TaskExecutionState(execution2.getAttemptId(), ExecutionState.FAILED, null, accumulatorSnapshot2, ioMetrics2);
scheduler.updateTaskExecutionState(state2);
assertEquals(ioMetrics2, execution2.getIOMetrics());
assertNotNull(execution2.getUserAccumulators());
assertEquals(8, execution2.getUserAccumulators().get("acc").getLocalValue());
}
use of org.apache.flink.runtime.scheduler.SchedulerBase in project flink by apache.
the class AdaptiveSchedulerTest method testComputeVertexParallelismStoreForExecutionInReactiveMode.
@Test
public void testComputeVertexParallelismStoreForExecutionInReactiveMode() {
JobVertex v1 = createNoOpVertex("v1", 1, 50);
JobVertex v2 = createNoOpVertex("v2", 50, 50);
JobGraph graph = streamingJobGraph(v1, v2);
VertexParallelismStore parallelismStore = AdaptiveScheduler.computeVertexParallelismStoreForExecution(graph, SchedulerExecutionMode.REACTIVE, SchedulerBase::getDefaultMaxParallelism);
for (JobVertex vertex : graph.getVertices()) {
VertexParallelismInformation info = parallelismStore.getParallelismInfo(vertex.getID());
assertThat(info.getParallelism()).isEqualTo(vertex.getParallelism());
assertThat(info.getMaxParallelism()).isEqualTo(vertex.getMaxParallelism());
}
}
use of org.apache.flink.runtime.scheduler.SchedulerBase in project flink by apache.
the class AdaptiveSchedulerTest method testComputeVertexParallelismStoreForExecutionInDefaultMode.
@Test
public void testComputeVertexParallelismStoreForExecutionInDefaultMode() {
JobVertex v1 = createNoOpVertex("v1", 1, 50);
JobVertex v2 = createNoOpVertex("v2", 50, 50);
JobGraph graph = streamingJobGraph(v1, v2);
VertexParallelismStore parallelismStore = AdaptiveScheduler.computeVertexParallelismStoreForExecution(graph, null, SchedulerBase::getDefaultMaxParallelism);
for (JobVertex vertex : graph.getVertices()) {
VertexParallelismInformation info = parallelismStore.getParallelismInfo(vertex.getID());
assertThat(info.getParallelism()).isEqualTo(vertex.getParallelism());
assertThat(info.getMaxParallelism()).isEqualTo(vertex.getMaxParallelism());
}
}
Aggregations