use of org.apache.flink.api.common.ExecutionConfig in project flink by apache.
the class ConnectedComponentsCoGroupTest method testWorksetConnectedComponents.
@Test
public void testWorksetConnectedComponents() {
Plan plan = getConnectedComponentsCoGroupPlan();
plan.setExecutionConfig(new ExecutionConfig());
OptimizedPlan optPlan = compileNoStats(plan);
OptimizerPlanNodeResolver or = getOptimizerPlanNodeResolver(optPlan);
if (PRINT_PLAN) {
PlanJSONDumpGenerator dumper = new PlanJSONDumpGenerator();
String json = dumper.getOptimizerPlanAsJSON(optPlan);
System.out.println(json);
}
SourcePlanNode vertexSource = or.getNode(VERTEX_SOURCE);
SourcePlanNode edgesSource = or.getNode(EDGES_SOURCE);
SinkPlanNode sink = or.getNode(SINK);
WorksetIterationPlanNode iter = or.getNode(ITERATION_NAME);
DualInputPlanNode neighborsJoin = or.getNode(JOIN_NEIGHBORS_MATCH);
DualInputPlanNode cogroup = or.getNode(MIN_ID_AND_UPDATE);
// --------------------------------------------------------------------
// Plan validation:
//
// We expect the plan to go with a sort-merge join, because the CoGroup
// sorts and the join in the successive iteration can re-exploit the sorting.
// --------------------------------------------------------------------
// test all drivers
Assert.assertEquals(DriverStrategy.NONE, sink.getDriverStrategy());
Assert.assertEquals(DriverStrategy.NONE, vertexSource.getDriverStrategy());
Assert.assertEquals(DriverStrategy.NONE, edgesSource.getDriverStrategy());
Assert.assertEquals(DriverStrategy.INNER_MERGE, neighborsJoin.getDriverStrategy());
Assert.assertEquals(set0, neighborsJoin.getKeysForInput1());
Assert.assertEquals(set0, neighborsJoin.getKeysForInput2());
Assert.assertEquals(DriverStrategy.CO_GROUP, cogroup.getDriverStrategy());
Assert.assertEquals(set0, cogroup.getKeysForInput1());
Assert.assertEquals(set0, cogroup.getKeysForInput2());
// test all the shipping strategies
Assert.assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iter.getInitialSolutionSetInput().getShipStrategy());
Assert.assertEquals(set0, iter.getInitialSolutionSetInput().getShipStrategyKeys());
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iter.getInitialWorksetInput().getShipStrategy());
Assert.assertEquals(set0, iter.getInitialWorksetInput().getShipStrategyKeys());
// workset
Assert.assertEquals(ShipStrategyType.FORWARD, neighborsJoin.getInput1().getShipStrategy());
// edges
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, neighborsJoin.getInput2().getShipStrategy());
Assert.assertEquals(set0, neighborsJoin.getInput2().getShipStrategyKeys());
Assert.assertTrue(neighborsJoin.getInput2().getTempMode().isCached());
// min id
Assert.assertEquals(ShipStrategyType.PARTITION_HASH, cogroup.getInput1().getShipStrategy());
// solution set
Assert.assertEquals(ShipStrategyType.FORWARD, cogroup.getInput2().getShipStrategy());
// test all the local strategies
Assert.assertEquals(LocalStrategy.NONE, sink.getInput().getLocalStrategy());
Assert.assertEquals(LocalStrategy.NONE, iter.getInitialSolutionSetInput().getLocalStrategy());
// the sort for the neighbor join in the first iteration is pushed out of the loop
Assert.assertEquals(LocalStrategy.SORT, iter.getInitialWorksetInput().getLocalStrategy());
// workset
Assert.assertEquals(LocalStrategy.NONE, neighborsJoin.getInput1().getLocalStrategy());
// edges
Assert.assertEquals(LocalStrategy.SORT, neighborsJoin.getInput2().getLocalStrategy());
Assert.assertEquals(LocalStrategy.SORT, cogroup.getInput1().getLocalStrategy());
// solution set
Assert.assertEquals(LocalStrategy.NONE, cogroup.getInput2().getLocalStrategy());
// check the caches
Assert.assertTrue(TempMode.CACHED == neighborsJoin.getInput2().getTempMode());
JobGraphGenerator jgg = new JobGraphGenerator();
jgg.compileJobGraph(optPlan);
}
use of org.apache.flink.api.common.ExecutionConfig in project flink by apache.
the class KMeansSingleStepTest method testCompileKMeansSingleStepWithOutStats.
@Test
public void testCompileKMeansSingleStepWithOutStats() {
Plan p = getKMeansPlan();
p.setExecutionConfig(new ExecutionConfig());
OptimizedPlan plan = compileNoStats(p);
checkPlan(plan);
}
use of org.apache.flink.api.common.ExecutionConfig in project flink by apache.
the class MiniClusterITCase method getSimpleJob.
private static JobGraph getSimpleJob() throws IOException {
JobVertex task = new JobVertex("Test task");
task.setParallelism(1);
task.setMaxParallelism(1);
task.setInvokableClass(NoOpInvokable.class);
JobGraph jg = new JobGraph(new JobID(), "Test Job", task);
jg.setAllowQueuedScheduling(true);
jg.setScheduleMode(ScheduleMode.EAGER);
ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
jg.setExecutionConfig(executionConfig);
return jg;
}
use of org.apache.flink.api.common.ExecutionConfig in project flink by apache.
the class SourceFunctionUtil method runSourceFunction.
public static <T extends Serializable> List<T> runSourceFunction(SourceFunction<T> sourceFunction) throws Exception {
final List<T> outputs = new ArrayList<T>();
if (sourceFunction instanceof RichFunction) {
AbstractStreamOperator<?> operator = mock(AbstractStreamOperator.class);
when(operator.getExecutionConfig()).thenReturn(new ExecutionConfig());
RuntimeContext runtimeContext = new StreamingRuntimeContext(operator, new MockEnvironment("MockTask", 3 * 1024 * 1024, new MockInputSplitProvider(), 1024), new HashMap<String, Accumulator<?, ?>>());
((RichFunction) sourceFunction).setRuntimeContext(runtimeContext);
((RichFunction) sourceFunction).open(new Configuration());
}
try {
SourceFunction.SourceContext<T> ctx = new CollectingSourceContext<T>(new Object(), outputs);
sourceFunction.run(ctx);
} catch (Exception e) {
throw new RuntimeException("Cannot invoke source.", e);
}
return outputs;
}
use of org.apache.flink.api.common.ExecutionConfig in project flink by apache.
the class CEPOperatorTest method testKeyedAdvancingTimeWithoutElements.
/**
* Tests that the internal time of a CEP operator advances only given watermarks. See FLINK-5033
*/
@Test
public void testKeyedAdvancingTimeWithoutElements() throws Exception {
final KeySelector<Event, Integer> keySelector = new TestKeySelector();
final Event startEvent = new Event(42, "start", 1.0);
final long watermarkTimestamp1 = 5L;
final long watermarkTimestamp2 = 13L;
final Map<String, Event> expectedSequence = new HashMap<>(2);
expectedSequence.put("start", startEvent);
OneInputStreamOperatorTestHarness<Event, Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(new TimeoutKeyedCEPPatternOperator<>(Event.createTypeSerializer(), false, keySelector, IntSerializer.INSTANCE, new NFAFactory(true), true), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup(new KryoSerializer<>((Class<Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>>) (Object) Either.class, new ExecutionConfig()));
harness.open();
harness.processElement(new StreamRecord<>(startEvent, 3L));
harness.processWatermark(new Watermark(watermarkTimestamp1));
harness.processWatermark(new Watermark(watermarkTimestamp2));
Queue<Object> result = harness.getOutput();
assertEquals(3L, result.size());
Object watermark1 = result.poll();
assertTrue(watermark1 instanceof Watermark);
assertEquals(watermarkTimestamp1, ((Watermark) watermark1).getTimestamp());
Object resultObject = result.poll();
assertTrue(resultObject instanceof StreamRecord);
StreamRecord<Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>> streamRecord = (StreamRecord<Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>>) resultObject;
assertTrue(streamRecord.getValue() instanceof Either.Left);
Either.Left<Tuple2<Map<String, Event>, Long>, Map<String, Event>> left = (Either.Left<Tuple2<Map<String, Event>, Long>, Map<String, Event>>) streamRecord.getValue();
Tuple2<Map<String, Event>, Long> leftResult = left.left();
assertEquals(watermarkTimestamp2, (long) leftResult.f1);
assertEquals(expectedSequence, leftResult.f0);
Object watermark2 = result.poll();
assertTrue(watermark2 instanceof Watermark);
assertEquals(watermarkTimestamp2, ((Watermark) watermark2).getTimestamp());
} finally {
harness.close();
}
}
Aggregations