use of org.apache.flink.optimizer.plan.SourcePlanNode in project flink by apache.
the class PropertyDataSourceTest method checkSinglePartitionedSource2.
@Test
public void checkSinglePartitionedSource2() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties().splitsPartitionedBy(1, 0);
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0, 1)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
use of org.apache.flink.optimizer.plan.SourcePlanNode in project flink by apache.
the class PropertyDataSourceTest method checkSinglePartitionedGroupedSource8.
@Test
public void checkSinglePartitionedGroupedSource8() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType);
data.getSplitDataProperties().splitsPartitionedBy("f1").splitsGroupedBy("f1.stringField");
data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1, 2, 3)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
use of org.apache.flink.optimizer.plan.SourcePlanNode in project flink by apache.
the class PropertyDataSourceTest method checkSinglePartitionedSource5.
@Test
public void checkSinglePartitionedSource5() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType);
data.getSplitDataProperties().splitsPartitionedBy("f1.stringField");
data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(3)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
use of org.apache.flink.optimizer.plan.SourcePlanNode in project flink by apache.
the class PropertyDataSourceTest method checkCoPartitionedSources1.
@Test
public void checkCoPartitionedSources1() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data1 = env.readCsvFile("/some/path").types(Long.class, String.class);
data1.getSplitDataProperties().splitsPartitionedBy("byDate", 0);
DataSource<Tuple2<Long, String>> data2 = env.readCsvFile("/some/path").types(Long.class, String.class);
data2.getSplitDataProperties().splitsPartitionedBy("byDate", 0);
data1.union(data2).output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode1 = (SourcePlanNode) ((NAryUnionPlanNode) sinkNode.getPredecessor()).getListOfInputs().get(0).getSource();
SourcePlanNode sourceNode2 = (SourcePlanNode) ((NAryUnionPlanNode) sinkNode.getPredecessor()).getListOfInputs().get(1).getSource();
GlobalProperties gprops1 = sourceNode1.getGlobalProperties();
LocalProperties lprops1 = sourceNode1.getLocalProperties();
GlobalProperties gprops2 = sourceNode2.getGlobalProperties();
LocalProperties lprops2 = sourceNode2.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops1.getPartitioningFields().toArray())).equals(new FieldSet(0)));
Assert.assertTrue(gprops1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING);
Assert.assertTrue(lprops1.getGroupedFields() == null);
Assert.assertTrue(lprops1.getOrdering() == null);
Assert.assertTrue((new FieldSet(gprops2.getPartitioningFields().toArray())).equals(new FieldSet(0)));
Assert.assertTrue(gprops2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING);
Assert.assertTrue(lprops2.getGroupedFields() == null);
Assert.assertTrue(lprops2.getOrdering() == null);
Assert.assertTrue(gprops1.getCustomPartitioner().equals(gprops2.getCustomPartitioner()));
}
use of org.apache.flink.optimizer.plan.SourcePlanNode in project flink by apache.
the class PropertyDataSourceTest method checkSinglePartitionedOrderedSource2.
@Test
public void checkSinglePartitionedOrderedSource2() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties().splitsPartitionedBy(1).splitsOrderedBy(new int[] { 1, 0 }, new Order[] { Order.ASCENDING, Order.DESCENDING });
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue((new FieldSet(lprops.getGroupedFields().toArray())).equals(new FieldSet(1, 0)));
Assert.assertTrue(lprops.getOrdering() == null);
}
Aggregations