use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class DataSetUtils method checksumHashCode.
// --------------------------------------------------------------------------------------------
// Checksum
// --------------------------------------------------------------------------------------------
/**
* Convenience method to get the count (number of elements) of a DataSet
* as well as the checksum (sum over element hashes).
*
* @return A ChecksumHashCode that represents the count and checksum of elements in the data set.
* @deprecated replaced with {@code org.apache.flink.graph.asm.dataset.ChecksumHashCode} in Gelly
*/
@Deprecated
public static <T> Utils.ChecksumHashCode checksumHashCode(DataSet<T> input) throws Exception {
final String id = new AbstractID().toString();
input.output(new Utils.ChecksumHashCodeHelper<T>(id)).name("ChecksumHashCode");
JobExecutionResult res = input.getExecutionEnvironment().execute();
return res.<Utils.ChecksumHashCode>getAccumulatorResult(id);
}
use of org.apache.flink.api.common.JobExecutionResult in project beam by apache.
the class FlinkRunner method run.
@Override
public PipelineResult run(Pipeline pipeline) {
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
MetricsEnvironment.setMetricsSupported(true);
LOG.info("Executing pipeline using FlinkRunner.");
FlinkPipelineExecutionEnvironment env = new FlinkPipelineExecutionEnvironment(options);
LOG.info("Translating pipeline to Flink program.");
env.translate(this, pipeline);
JobExecutionResult result;
try {
LOG.info("Starting execution of Flink program.");
result = env.executePipeline();
} catch (Exception e) {
LOG.error("Pipeline execution failed", e);
throw new RuntimeException("Pipeline execution failed", e);
}
if (result instanceof DetachedEnvironment.DetachedJobExecutionResult) {
LOG.info("Pipeline submitted in Detached mode");
return new FlinkDetachedRunnerResult();
} else {
LOG.info("Execution finished in {} msecs", result.getNetRuntime());
Map<String, Object> accumulators = result.getAllAccumulatorResults();
if (accumulators != null && !accumulators.isEmpty()) {
LOG.info("Final accumulator values:");
for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) {
LOG.info("{} : {}", entry.getKey(), entry.getValue());
}
}
return new FlinkRunnerResult(accumulators, result.getNetRuntime());
}
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class CliFrontend method executeProgram.
// --------------------------------------------------------------------------------------------
// Interaction with programs and JobManager
// --------------------------------------------------------------------------------------------
protected int executeProgram(PackagedProgram program, ClusterClient client, int parallelism) {
logAndSysout("Starting execution of program");
JobSubmissionResult result;
try {
result = client.run(program, parallelism);
} catch (ProgramParametrizationException e) {
return handleParametrizationException(e);
} catch (ProgramMissingJobException e) {
return handleMissingJobException();
} catch (ProgramInvocationException e) {
return handleError(e);
} finally {
program.deleteExtractedLibraries();
}
if (null == result) {
logAndSysout("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()");
return 1;
}
if (result.isJobExecutionResult()) {
logAndSysout("Program execution finished");
JobExecutionResult execResult = result.getJobExecutionResult();
System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
if (accumulatorsResult.size() > 0) {
System.out.println("Accumulator Results: ");
System.out.println(AccumulatorHelper.getResultsFormated(accumulatorsResult));
}
} else {
logAndSysout("Job has been submitted with JobID " + result.getJobID());
}
return 0;
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class EmptyFieldsCountAccumulator method main.
public static void main(final String[] args) throws Exception {
final ParameterTool params = ParameterTool.fromArgs(args);
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
// get the data set
final DataSet<StringTriple> file = getDataSet(env, params);
// filter lines with empty fields
final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter());
// Here, we could do further processing with the filtered lines...
JobExecutionResult result;
// output the filtered lines
if (params.has("output")) {
filteredLines.writeAsCsv(params.get("output"));
// execute program
result = env.execute("Accumulator example");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
filteredLines.print();
result = env.getLastJobExecutionResult();
}
// get the accumulator result via its registration key
final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR);
System.out.format("Number of detected empty fields per column: %s\n", emptyFields);
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class DataSet method count.
/**
* Convenience method to get the count (number of elements) of a DataSet.
*
* @return A long integer that represents the number of elements in the data set.
*/
public long count() throws Exception {
final String id = new AbstractID().toString();
output(new Utils.CountHelper<T>(id)).name("count()");
JobExecutionResult res = getExecutionEnvironment().execute();
return res.<Long>getAccumulatorResult(id);
}
Aggregations