Search in sources :

Example 1 with IntCounter

use of org.apache.flink.api.common.accumulators.IntCounter in project flink by apache.

the class ExecutionGraphDeploymentTest method testAccumulatorsAndMetricsForwarding.

/**
	 * Verifies that {@link ExecutionGraph#updateState(TaskExecutionState)} updates the accumulators and metrics for an
	 * execution that failed or was canceled.
	 */
@Test
public void testAccumulatorsAndMetricsForwarding() throws Exception {
    final JobVertexID jid1 = new JobVertexID();
    final JobVertexID jid2 = new JobVertexID();
    JobVertex v1 = new JobVertex("v1", jid1);
    JobVertex v2 = new JobVertex("v2", jid2);
    Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> graphAndExecutions = setupExecution(v1, 1, v2, 1);
    ExecutionGraph graph = graphAndExecutions.f0;
    // verify behavior for canceled executions
    Execution execution1 = graphAndExecutions.f1.values().iterator().next();
    IOMetrics ioMetrics = new IOMetrics(0, 0, 0, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0);
    Map<String, Accumulator<?, ?>> accumulators = new HashMap<>();
    accumulators.put("acc", new IntCounter(4));
    AccumulatorSnapshot accumulatorSnapshot = new AccumulatorSnapshot(graph.getJobID(), execution1.getAttemptId(), accumulators);
    TaskExecutionState state = new TaskExecutionState(graph.getJobID(), execution1.getAttemptId(), ExecutionState.CANCELED, null, accumulatorSnapshot, ioMetrics);
    graph.updateState(state);
    assertEquals(ioMetrics, execution1.getIOMetrics());
    assertNotNull(execution1.getUserAccumulators());
    assertEquals(4, execution1.getUserAccumulators().get("acc").getLocalValue());
    // verify behavior for failed executions
    Execution execution2 = graphAndExecutions.f1.values().iterator().next();
    IOMetrics ioMetrics2 = new IOMetrics(0, 0, 0, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0);
    Map<String, Accumulator<?, ?>> accumulators2 = new HashMap<>();
    accumulators2.put("acc", new IntCounter(8));
    AccumulatorSnapshot accumulatorSnapshot2 = new AccumulatorSnapshot(graph.getJobID(), execution2.getAttemptId(), accumulators2);
    TaskExecutionState state2 = new TaskExecutionState(graph.getJobID(), execution2.getAttemptId(), ExecutionState.FAILED, null, accumulatorSnapshot2, ioMetrics2);
    graph.updateState(state2);
    assertEquals(ioMetrics2, execution2.getIOMetrics());
    assertNotNull(execution2.getUserAccumulators());
    assertEquals(8, execution2.getUserAccumulators().get("acc").getLocalValue());
}
Also used : Accumulator(org.apache.flink.api.common.accumulators.Accumulator) HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TaskExecutionState(org.apache.flink.runtime.taskmanager.TaskExecutionState) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) AccumulatorSnapshot(org.apache.flink.runtime.accumulators.AccumulatorSnapshot) IntCounter(org.apache.flink.api.common.accumulators.IntCounter) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 2 with IntCounter

use of org.apache.flink.api.common.accumulators.IntCounter in project flink by apache.

the class SubtaskExecutionAttemptAccumulatorsHandlerTest method testHandleRequest.

@Test
public void testHandleRequest() throws Exception {
    // Instance the handler.
    final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());
    final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(() -> null, Time.milliseconds(100L), Collections.emptyMap(), SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(), new DefaultExecutionGraphCache(restHandlerConfiguration.getTimeout(), Time.milliseconds(restHandlerConfiguration.getRefreshInterval())), TestingUtils.defaultExecutor());
    // Instance a empty request.
    final HandlerRequest<EmptyRequestBody> request = HandlerRequest.create(EmptyRequestBody.getInstance(), new SubtaskAttemptMessageParameters());
    final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
    userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
    userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
    userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));
    // Instance the expected result.
    final StringifiedAccumulatorResult[] accumulatorResults = StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
    final int attemptNum = 1;
    final int subtaskIndex = 2;
    // Instance the tested execution.
    final ArchivedExecution execution = new ArchivedExecution(accumulatorResults, null, new ExecutionAttemptID(), attemptNum, ExecutionState.FINISHED, null, null, null, subtaskIndex, new long[ExecutionState.values().length]);
    // Invoke tested method.
    final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);
    final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
    for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
        userAccumulatorList.add(new UserAccumulator(accumulatorResult.getName(), accumulatorResult.getType(), accumulatorResult.getValue()));
    }
    final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(subtaskIndex, attemptNum, execution.getAttemptId().toString(), userAccumulatorList);
    // Verify.
    assertEquals(expected, accumulatorsInfo);
}
Also used : RestHandlerConfiguration(org.apache.flink.runtime.rest.handler.RestHandlerConfiguration) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) Configuration(org.apache.flink.configuration.Configuration) RestHandlerConfiguration(org.apache.flink.runtime.rest.handler.RestHandlerConfiguration) HashMap(java.util.HashMap) SubtaskAttemptMessageParameters(org.apache.flink.runtime.rest.messages.job.SubtaskAttemptMessageParameters) StringifiedAccumulatorResult(org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult) ArrayList(java.util.ArrayList) ArchivedExecution(org.apache.flink.runtime.executiongraph.ArchivedExecution) SubtaskExecutionAttemptAccumulatorsInfo(org.apache.flink.runtime.rest.messages.job.SubtaskExecutionAttemptAccumulatorsInfo) EmptyRequestBody(org.apache.flink.runtime.rest.messages.EmptyRequestBody) LongCounter(org.apache.flink.api.common.accumulators.LongCounter) UserAccumulator(org.apache.flink.runtime.rest.messages.job.UserAccumulator) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) DefaultExecutionGraphCache(org.apache.flink.runtime.rest.handler.legacy.DefaultExecutionGraphCache) OptionalFailure(org.apache.flink.util.OptionalFailure) IntCounter(org.apache.flink.api.common.accumulators.IntCounter) Test(org.junit.Test)

Example 3 with IntCounter

use of org.apache.flink.api.common.accumulators.IntCounter in project flink by apache.

the class DefaultExecutionGraphDeploymentTest method testAccumulatorsAndMetricsForwarding.

/**
 * Verifies that {@link SchedulerNG#updateTaskExecutionState(TaskExecutionState)} updates the
 * accumulators and metrics for an execution that failed or was canceled.
 */
@Test
public void testAccumulatorsAndMetricsForwarding() throws Exception {
    final JobVertexID jid1 = new JobVertexID();
    final JobVertexID jid2 = new JobVertexID();
    JobVertex v1 = new JobVertex("v1", jid1);
    JobVertex v2 = new JobVertex("v2", jid2);
    SchedulerBase scheduler = setupScheduler(v1, 1, v2, 1);
    ExecutionGraph graph = scheduler.getExecutionGraph();
    Map<ExecutionAttemptID, Execution> executions = graph.getRegisteredExecutions();
    // verify behavior for canceled executions
    Execution execution1 = executions.values().iterator().next();
    IOMetrics ioMetrics = new IOMetrics(0, 0, 0, 0);
    Map<String, Accumulator<?, ?>> accumulators = new HashMap<>();
    accumulators.put("acc", new IntCounter(4));
    AccumulatorSnapshot accumulatorSnapshot = new AccumulatorSnapshot(graph.getJobID(), execution1.getAttemptId(), accumulators);
    TaskExecutionState state = new TaskExecutionState(execution1.getAttemptId(), ExecutionState.CANCELED, null, accumulatorSnapshot, ioMetrics);
    scheduler.updateTaskExecutionState(state);
    assertEquals(ioMetrics, execution1.getIOMetrics());
    assertNotNull(execution1.getUserAccumulators());
    assertEquals(4, execution1.getUserAccumulators().get("acc").getLocalValue());
    // verify behavior for failed executions
    Execution execution2 = executions.values().iterator().next();
    IOMetrics ioMetrics2 = new IOMetrics(0, 0, 0, 0);
    Map<String, Accumulator<?, ?>> accumulators2 = new HashMap<>();
    accumulators2.put("acc", new IntCounter(8));
    AccumulatorSnapshot accumulatorSnapshot2 = new AccumulatorSnapshot(graph.getJobID(), execution2.getAttemptId(), accumulators2);
    TaskExecutionState state2 = new TaskExecutionState(execution2.getAttemptId(), ExecutionState.FAILED, null, accumulatorSnapshot2, ioMetrics2);
    scheduler.updateTaskExecutionState(state2);
    assertEquals(ioMetrics2, execution2.getIOMetrics());
    assertNotNull(execution2.getUserAccumulators());
    assertEquals(8, execution2.getUserAccumulators().get("acc").getLocalValue());
}
Also used : Accumulator(org.apache.flink.api.common.accumulators.Accumulator) HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TaskExecutionState(org.apache.flink.runtime.taskmanager.TaskExecutionState) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) AccumulatorSnapshot(org.apache.flink.runtime.accumulators.AccumulatorSnapshot) IntCounter(org.apache.flink.api.common.accumulators.IntCounter) Test(org.junit.Test)

Example 4 with IntCounter

use of org.apache.flink.api.common.accumulators.IntCounter in project flink by apache.

the class StringifiedAccumulatorResultTest method stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly.

@Test
public void stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly() {
    final String name = "a";
    final int targetValue = 314159;
    final IntCounter acc = new IntCounter();
    acc.add(targetValue);
    final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
    accumulatorMap.put(name, OptionalFailure.of(acc));
    final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);
    assertEquals(1, results.length);
    final StringifiedAccumulatorResult firstResult = results[0];
    assertEquals(name, firstResult.getName());
    assertEquals("IntCounter", firstResult.getType());
    assertEquals(Integer.toString(targetValue), firstResult.getValue());
}
Also used : HashMap(java.util.HashMap) OptionalFailure(org.apache.flink.util.OptionalFailure) IntCounter(org.apache.flink.api.common.accumulators.IntCounter) Test(org.junit.Test)

Aggregations

HashMap (java.util.HashMap)4 IntCounter (org.apache.flink.api.common.accumulators.IntCounter)4 Test (org.junit.Test)4 Accumulator (org.apache.flink.api.common.accumulators.Accumulator)2 AccumulatorSnapshot (org.apache.flink.runtime.accumulators.AccumulatorSnapshot)2 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)2 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)2 TaskExecutionState (org.apache.flink.runtime.taskmanager.TaskExecutionState)2 OptionalFailure (org.apache.flink.util.OptionalFailure)2 ArrayList (java.util.ArrayList)1 Map (java.util.Map)1 LongCounter (org.apache.flink.api.common.accumulators.LongCounter)1 Configuration (org.apache.flink.configuration.Configuration)1 StringifiedAccumulatorResult (org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult)1 ArchivedExecution (org.apache.flink.runtime.executiongraph.ArchivedExecution)1 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)1 RestHandlerConfiguration (org.apache.flink.runtime.rest.handler.RestHandlerConfiguration)1 DefaultExecutionGraphCache (org.apache.flink.runtime.rest.handler.legacy.DefaultExecutionGraphCache)1 EmptyRequestBody (org.apache.flink.runtime.rest.messages.EmptyRequestBody)1 SubtaskAttemptMessageParameters (org.apache.flink.runtime.rest.messages.job.SubtaskAttemptMessageParameters)1