Search in sources :

Example 26 with JobExecutionResult

use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.

the class AccumulatorITCase method postSubmit.

@Override
protected void postSubmit() throws Exception {
    compareResultsByLinesInMemory(EXPECTED, resultPath);
    // Test accumulator results
    System.out.println("Accumulator results:");
    JobExecutionResult res = this.result;
    System.out.println(AccumulatorHelper.getResultsFormated(res.getAllAccumulatorResults()));
    Assert.assertEquals(Integer.valueOf(3), (Integer) res.getAccumulatorResult("num-lines"));
    Assert.assertEquals(Double.valueOf(getParallelism()), (Double) res.getAccumulatorResult("open-close-counter"));
    // Test histogram (words per line distribution)
    Map<Integer, Integer> dist = Maps.newHashMap();
    dist.put(1, 1);
    dist.put(2, 1);
    dist.put(3, 1);
    Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line"));
    // Test distinct words (custom accumulator)
    Set<StringValue> distinctWords = Sets.newHashSet();
    distinctWords.add(new StringValue("one"));
    distinctWords.add(new StringValue("two"));
    distinctWords.add(new StringValue("three"));
    Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words"));
}
Also used : JobExecutionResult(org.apache.flink.api.common.JobExecutionResult) StringValue(org.apache.flink.types.StringValue)

Example 27 with JobExecutionResult

use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.

the class MiscellaneousIssuesITCase method testAccumulatorsAfterNoOp.

@Test
public void testAccumulatorsAfterNoOp() {
    final String ACC_NAME = "test_accumulator";
    try {
        ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
        env.setParallelism(6);
        env.getConfig().disableSysoutLogging();
        env.generateSequence(1, 1000000).rebalance().flatMap(new RichFlatMapFunction<Long, Long>() {

            private LongCounter counter;

            @Override
            public void open(Configuration parameters) {
                counter = getRuntimeContext().getLongCounter(ACC_NAME);
            }

            @Override
            public void flatMap(Long value, Collector<Long> out) {
                counter.add(1L);
            }
        }).output(new DiscardingOutputFormat<Long>());
        JobExecutionResult result = env.execute();
        assertEquals(1000000L, result.getAllAccumulatorResults().get(ACC_NAME));
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : JobExecutionResult(org.apache.flink.api.common.JobExecutionResult) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Configuration(org.apache.flink.configuration.Configuration) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) Collector(org.apache.flink.util.Collector) LongCounter(org.apache.flink.api.common.accumulators.LongCounter) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) Test(org.junit.Test)

Example 28 with JobExecutionResult

use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.

the class JobMaster method jobStatusChanged.

private void jobStatusChanged(final JobStatus newJobStatus, long timestamp, final Throwable error) {
    validateRunsInMainThread();
    final JobID jobID = executionGraph.getJobID();
    final String jobName = executionGraph.getJobName();
    log.info("Status of job {} ({}) changed to {}.", jobID, jobName, newJobStatus, error);
    if (newJobStatus.isGloballyTerminalState()) {
        switch(newJobStatus) {
            case FINISHED:
                try {
                    // TODO get correct job duration
                    // job done, let's get the accumulators
                    Map<String, Object> accumulatorResults = executionGraph.getAccumulators();
                    JobExecutionResult result = new JobExecutionResult(jobID, 0L, accumulatorResults);
                    jobCompletionActions.jobFinished(result);
                } catch (Exception e) {
                    log.error("Cannot fetch final accumulators for job {} ({})", jobName, jobID, e);
                    final JobExecutionException exception = new JobExecutionException(jobID, "Failed to retrieve accumulator results. " + "The job is registered as 'FINISHED (successful), but this notification describes " + "a failure, since the resulting accumulators could not be fetched.", e);
                    jobCompletionActions.jobFailed(exception);
                }
                break;
            case CANCELED:
                {
                    final JobExecutionException exception = new JobExecutionException(jobID, "Job was cancelled.", new Exception("The job was cancelled"));
                    jobCompletionActions.jobFailed(exception);
                    break;
                }
            case FAILED:
                {
                    final Throwable unpackedError = SerializedThrowable.get(error, userCodeLoader);
                    final JobExecutionException exception = new JobExecutionException(jobID, "Job execution failed.", unpackedError);
                    jobCompletionActions.jobFailed(exception);
                    break;
                }
            default:
                // this can happen only if the enum is buggy
                throw new IllegalStateException(newJobStatus.toString());
        }
    }
}
Also used : JobExecutionResult(org.apache.flink.api.common.JobExecutionResult) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) SerializedThrowable(org.apache.flink.runtime.util.SerializedThrowable) JobID(org.apache.flink.api.common.JobID) TimeoutException(java.util.concurrent.TimeoutException) CheckpointException(org.apache.flink.runtime.checkpoint.CheckpointException) LeaderIdMismatchException(org.apache.flink.runtime.highavailability.LeaderIdMismatchException) PartitionProducerDisposedException(org.apache.flink.runtime.jobmanager.PartitionProducerDisposedException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) IOException(java.io.IOException)

Aggregations

JobExecutionResult (org.apache.flink.api.common.JobExecutionResult)28 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)10 ParameterTool (org.apache.flink.api.java.utils.ParameterTool)7 ProgramParametrizationException (org.apache.flink.client.program.ProgramParametrizationException)7 NumberFormat (java.text.NumberFormat)6 JDKRandomGeneratorFactory (org.apache.flink.graph.generator.random.JDKRandomGeneratorFactory)6 LongValue (org.apache.flink.types.LongValue)6 NullValue (org.apache.flink.types.NullValue)6 Graph (org.apache.flink.graph.Graph)5 GraphCsvReader (org.apache.flink.graph.GraphCsvReader)5 LongValueToUnsignedIntValue (org.apache.flink.graph.asm.translate.translators.LongValueToUnsignedIntValue)5 RMatGraph (org.apache.flink.graph.generator.RMatGraph)5 RandomGenerableFactory (org.apache.flink.graph.generator.random.RandomGenerableFactory)5 IntValue (org.apache.flink.types.IntValue)5 StringValue (org.apache.flink.types.StringValue)5 Test (org.junit.Test)5 IOException (java.io.IOException)4 DataSet (org.apache.flink.api.java.DataSet)4 ProgramInvocationException (org.apache.flink.client.program.ProgramInvocationException)3 GraphAnalytic (org.apache.flink.graph.GraphAnalytic)3