use of org.apache.flink.api.common.InvalidProgramException in project flink by apache.
the class CollectionExecutor method executeUnaryOperator.
private <IN, OUT> List<OUT> executeUnaryOperator(SingleInputOperator<?, ?, ?> operator, int superStep) throws Exception {
Operator<?> inputOp = operator.getInput();
if (inputOp == null) {
throw new InvalidProgramException("The unary operation " + operator.getName() + " has no input.");
}
@SuppressWarnings("unchecked") List<IN> inputData = (List<IN>) execute(inputOp, superStep);
@SuppressWarnings("unchecked") SingleInputOperator<IN, OUT, ?> typedOp = (SingleInputOperator<IN, OUT, ?>) operator;
// build the runtime context and compute broadcast variables, if necessary
TaskInfo taskInfo = new TaskInfo(typedOp.getName(), 1, 0, 1, 0);
RuntimeUDFContext ctx;
MetricGroup metrics = new UnregisteredMetricsGroup();
if (RichFunction.class.isAssignableFrom(typedOp.getUserCodeWrapper().getUserCodeClass())) {
ctx = superStep == 0 ? new RuntimeUDFContext(taskInfo, classLoader, executionConfig, cachedFiles, accumulators, metrics) : new IterationRuntimeUDFContext(taskInfo, classLoader, executionConfig, cachedFiles, accumulators, metrics);
for (Map.Entry<String, Operator<?>> bcInputs : operator.getBroadcastInputs().entrySet()) {
List<?> bcData = execute(bcInputs.getValue());
ctx.setBroadcastVariable(bcInputs.getKey(), bcData);
}
} else {
ctx = null;
}
return typedOp.executeOnCollections(inputData, ctx, executionConfig);
}
use of org.apache.flink.api.common.InvalidProgramException in project flink by apache.
the class CollectionExecutor method executeBulkIteration.
@SuppressWarnings("unchecked")
private <T> List<T> executeBulkIteration(BulkIterationBase<?> iteration) throws Exception {
Operator<?> inputOp = iteration.getInput();
if (inputOp == null) {
throw new InvalidProgramException("The iteration " + iteration.getName() + " has no input (initial partial solution).");
}
if (iteration.getNextPartialSolution() == null) {
throw new InvalidProgramException("The iteration " + iteration.getName() + " has no next partial solution defined (is not closed).");
}
List<T> inputData = (List<T>) execute(inputOp);
// get the operators that are iterative
Set<Operator<?>> dynamics = new LinkedHashSet<Operator<?>>();
DynamicPathCollector dynCollector = new DynamicPathCollector(dynamics);
iteration.getNextPartialSolution().accept(dynCollector);
if (iteration.getTerminationCriterion() != null) {
iteration.getTerminationCriterion().accept(dynCollector);
}
// register the aggregators
for (AggregatorWithName<?> a : iteration.getAggregators().getAllRegisteredAggregators()) {
aggregators.put(a.getName(), a.getAggregator());
}
String convCriterionAggName = iteration.getAggregators().getConvergenceCriterionAggregatorName();
ConvergenceCriterion<Value> convCriterion = (ConvergenceCriterion<Value>) iteration.getAggregators().getConvergenceCriterion();
List<T> currentResult = inputData;
final int maxIterations = iteration.getMaximumNumberOfIterations();
for (int superstep = 1; superstep <= maxIterations; superstep++) {
// set the input to the current partial solution
this.intermediateResults.put(iteration.getPartialSolution(), currentResult);
// set the superstep number
iterationSuperstep = superstep;
// grab the current iteration result
currentResult = (List<T>) execute(iteration.getNextPartialSolution(), superstep);
// evaluate the termination criterion
if (iteration.getTerminationCriterion() != null) {
execute(iteration.getTerminationCriterion(), superstep);
}
// evaluate the aggregator convergence criterion
if (convCriterion != null && convCriterionAggName != null) {
Value v = aggregators.get(convCriterionAggName).getAggregate();
if (convCriterion.isConverged(superstep, v)) {
break;
}
}
// clear the dynamic results
for (Operator<?> o : dynamics) {
intermediateResults.remove(o);
}
// set the previous iteration's aggregates and reset the aggregators
for (Map.Entry<String, Aggregator<?>> e : aggregators.entrySet()) {
previousAggregates.put(e.getKey(), e.getValue().getAggregate());
e.getValue().reset();
}
}
previousAggregates.clear();
aggregators.clear();
return currentResult;
}
use of org.apache.flink.api.common.InvalidProgramException in project flink by apache.
the class PartitionITCase method testRangePartitionInIteration.
@Test(expected = InvalidProgramException.class)
public void testRangePartitionInIteration() throws Exception {
// does not apply for collection execution
if (super.mode == TestExecutionMode.COLLECTION) {
throw new InvalidProgramException("Does not apply for collection execution");
}
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSource<Long> source = env.generateSequence(0, 10000);
DataSet<Tuple2<Long, String>> tuples = source.map(new MapFunction<Long, Tuple2<Long, String>>() {
@Override
public Tuple2<Long, String> map(Long v) throws Exception {
return new Tuple2<>(v, Long.toString(v));
}
});
DeltaIteration<Tuple2<Long, String>, Tuple2<Long, String>> it = tuples.iterateDelta(tuples, 10, 0);
DataSet<Tuple2<Long, String>> body = it.getWorkset().partitionByRange(// Verify that range partition is not allowed in iteration
1).join(it.getSolutionSet()).where(0).equalTo(0).projectFirst(0).projectSecond(1);
DataSet<Tuple2<Long, String>> result = it.closeWith(body, body);
// should fail
result.collect();
}
use of org.apache.flink.api.common.InvalidProgramException in project flink by apache.
the class DeltaIterationTranslationTest method testRejectWhenSolutionSetKeysDontMatchCoGroup.
@Test
public void testRejectWhenSolutionSetKeysDontMatchCoGroup() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
@SuppressWarnings("unchecked") DataSet<Tuple3<Double, Long, String>> initialSolutionSet = env.fromElements(new Tuple3<Double, Long, String>(3.44, 5L, "abc"));
@SuppressWarnings("unchecked") DataSet<Tuple2<Double, String>> initialWorkSet = env.fromElements(new Tuple2<Double, String>(1.23, "abc"));
DeltaIteration<Tuple3<Double, Long, String>, Tuple2<Double, String>> iteration = initialSolutionSet.iterateDelta(initialWorkSet, 10, 1);
try {
iteration.getWorkset().coGroup(iteration.getSolutionSet()).where(1).equalTo(2).with(new SolutionWorksetCoGroup1());
fail("Accepted invalid program.");
} catch (InvalidProgramException e) {
// all good!
}
try {
iteration.getSolutionSet().coGroup(iteration.getWorkset()).where(2).equalTo(1).with(new SolutionWorksetCoGroup2());
fail("Accepted invalid program.");
} catch (InvalidProgramException e) {
// all good!
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.InvalidProgramException in project flink by apache.
the class WorksetIterationsJavaApiCompilerTest method testRejectPlanIfSolutionSetKeysAndJoinKeysDontMatch.
@Test
public void testRejectPlanIfSolutionSetKeysAndJoinKeysDontMatch() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
@SuppressWarnings("unchecked") DataSet<Tuple3<Long, Long, Long>> solutionSetInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Solution Set");
@SuppressWarnings("unchecked") DataSet<Tuple3<Long, Long, Long>> worksetInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Workset");
@SuppressWarnings("unchecked") DataSet<Tuple3<Long, Long, Long>> invariantInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Invariant Input");
DeltaIteration<Tuple3<Long, Long, Long>, Tuple3<Long, Long, Long>> iter = solutionSetInput.iterateDelta(worksetInput, 100, 1, 2);
DataSet<Tuple3<Long, Long, Long>> result = iter.getWorkset().join(invariantInput).where(1, 2).equalTo(1, 2).with(new JoinFunction<Tuple3<Long, Long, Long>, Tuple3<Long, Long, Long>, Tuple3<Long, Long, Long>>() {
public Tuple3<Long, Long, Long> join(Tuple3<Long, Long, Long> first, Tuple3<Long, Long, Long> second) {
return first;
}
});
try {
result.join(iter.getSolutionSet()).where(1, 0).equalTo(0, 2).with(new JoinFunction<Tuple3<Long, Long, Long>, Tuple3<Long, Long, Long>, Tuple3<Long, Long, Long>>() {
public Tuple3<Long, Long, Long> join(Tuple3<Long, Long, Long> first, Tuple3<Long, Long, Long> second) {
return second;
}
});
fail("The join should be rejected with key type mismatches.");
} catch (InvalidProgramException e) {
// expected!
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
Aggregations