use of org.apache.flink.api.common.operators.util.FieldList in project flink by apache.
the class WorksetIterationsJavaApiCompilerTest method testJavaApiWithDeferredSoltionSetUpdateWithNonPreservingJoin.
@Test
public void testJavaApiWithDeferredSoltionSetUpdateWithNonPreservingJoin() {
try {
Plan plan = getJavaTestPlan(false, false);
OptimizedPlan oPlan = compileNoStats(plan);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
DualInputPlanNode joinWithInvariantNode = resolver.getNode(JOIN_WITH_INVARIANT_NAME);
DualInputPlanNode joinWithSolutionSetNode = resolver.getNode(JOIN_WITH_SOLUTION_SET);
SingleInputPlanNode worksetReducer = resolver.getNode(NEXT_WORKSET_REDUCER_NAME);
// iteration preserves partitioning in reducer, so the first partitioning is out of the loop,
// the in-loop partitioning is before the final reducer
// verify joinWithInvariant
assertEquals(ShipStrategyType.FORWARD, joinWithInvariantNode.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, joinWithInvariantNode.getInput2().getShipStrategy());
assertEquals(new FieldList(1, 2), joinWithInvariantNode.getKeysForInput1());
assertEquals(new FieldList(1, 2), joinWithInvariantNode.getKeysForInput2());
// verify joinWithSolutionSet
assertEquals(ShipStrategyType.PARTITION_HASH, joinWithSolutionSetNode.getInput1().getShipStrategy());
assertEquals(ShipStrategyType.FORWARD, joinWithSolutionSetNode.getInput2().getShipStrategy());
assertEquals(new FieldList(1, 0), joinWithSolutionSetNode.getKeysForInput1());
// verify reducer
assertEquals(ShipStrategyType.PARTITION_HASH, worksetReducer.getInput().getShipStrategy());
assertEquals(new FieldList(1, 2), worksetReducer.getKeys(0));
// verify solution delta
assertEquals(2, joinWithSolutionSetNode.getOutgoingChannels().size());
assertEquals(ShipStrategyType.PARTITION_HASH, joinWithSolutionSetNode.getOutgoingChannels().get(0).getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, joinWithSolutionSetNode.getOutgoingChannels().get(1).getShipStrategy());
new JobGraphGenerator().compileJobGraph(oPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test errored: " + e.getMessage());
}
}
use of org.apache.flink.api.common.operators.util.FieldList in project flink by apache.
the class CoGroupGlobalPropertiesCompatibilityTest method checkCompatiblePartitionings.
@Test
public void checkCompatiblePartitionings() {
try {
final FieldList keysLeft = new FieldList(1, 4);
final FieldList keysRight = new FieldList(3, 1);
CoGroupDescriptor descr = new CoGroupDescriptor(keysLeft, keysRight);
// test compatible hash partitioning
{
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setHashPartitioned(keysLeft);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setHashPartitioned(keysRight);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setHashPartitioned(keysLeft);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setHashPartitioned(keysRight);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
// test compatible custom partitioning
{
Partitioner<Object> part = new Partitioner<Object>() {
@Override
public int partition(Object key, int numPartitions) {
return 0;
}
};
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setCustomPartitioned(keysLeft, part);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setCustomPartitioned(keysRight, part);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setCustomPartitioned(keysLeft, part);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setCustomPartitioned(keysRight, part);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
// test custom partitioning matching any partitioning
{
Partitioner<Object> part = new Partitioner<Object>() {
@Override
public int partition(Object key, int numPartitions) {
return 0;
}
};
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setAnyPartitioning(keysLeft);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setAnyPartitioning(keysRight);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setCustomPartitioned(keysLeft, part);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setCustomPartitioned(keysRight, part);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
TestDistribution dist1 = new TestDistribution(1);
TestDistribution dist2 = new TestDistribution(1);
// test compatible range partitioning with one ordering
{
Ordering ordering1 = new Ordering();
for (int field : keysLeft) {
ordering1.appendOrdering(field, null, Order.ASCENDING);
}
Ordering ordering2 = new Ordering();
for (int field : keysRight) {
ordering2.appendOrdering(field, null, Order.ASCENDING);
}
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setRangePartitioned(ordering1, dist1);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setRangePartitioned(ordering2, dist2);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setRangePartitioned(ordering1, dist1);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setRangePartitioned(ordering2, dist2);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
// test compatible range partitioning with two orderings
{
Ordering ordering1 = new Ordering();
ordering1.appendOrdering(keysLeft.get(0), null, Order.DESCENDING);
ordering1.appendOrdering(keysLeft.get(1), null, Order.ASCENDING);
Ordering ordering2 = new Ordering();
ordering2.appendOrdering(keysRight.get(0), null, Order.DESCENDING);
ordering2.appendOrdering(keysRight.get(1), null, Order.ASCENDING);
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setRangePartitioned(ordering1, dist1);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setRangePartitioned(ordering2, dist2);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setRangePartitioned(ordering1, dist1);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setRangePartitioned(ordering2, dist2);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.operators.util.FieldList in project flink by apache.
the class JoinGlobalPropertiesCompatibilityTest method checkCompatiblePartitionings.
@Test
public void checkCompatiblePartitionings() {
try {
final FieldList keysLeft = new FieldList(1, 4);
final FieldList keysRight = new FieldList(3, 1);
SortMergeInnerJoinDescriptor descr = new SortMergeInnerJoinDescriptor(keysLeft, keysRight);
// test compatible hash partitioning
{
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setHashPartitioned(keysLeft);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setHashPartitioned(keysRight);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setHashPartitioned(keysLeft);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setHashPartitioned(keysRight);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
// test compatible custom partitioning
{
Partitioner<Object> part = new Partitioner<Object>() {
@Override
public int partition(Object key, int numPartitions) {
return 0;
}
};
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setCustomPartitioned(keysLeft, part);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setCustomPartitioned(keysRight, part);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setCustomPartitioned(keysLeft, part);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setCustomPartitioned(keysRight, part);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
// test custom partitioning matching any partitioning
{
Partitioner<Object> part = new Partitioner<Object>() {
@Override
public int partition(Object key, int numPartitions) {
return 0;
}
};
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setAnyPartitioning(keysLeft);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setAnyPartitioning(keysRight);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setCustomPartitioned(keysLeft, part);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setCustomPartitioned(keysRight, part);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
TestDistribution dist1 = new TestDistribution(1);
TestDistribution dist2 = new TestDistribution(1);
// test compatible range partitioning with one ordering
{
Ordering ordering1 = new Ordering();
for (int field : keysLeft) {
ordering1.appendOrdering(field, null, Order.ASCENDING);
}
Ordering ordering2 = new Ordering();
for (int field : keysRight) {
ordering2.appendOrdering(field, null, Order.ASCENDING);
}
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setRangePartitioned(ordering1, dist1);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setRangePartitioned(ordering2, dist2);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setRangePartitioned(ordering1, dist1);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setRangePartitioned(ordering2, dist2);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
// test compatible range partitioning with two orderings
{
Ordering ordering1 = new Ordering();
ordering1.appendOrdering(keysLeft.get(0), null, Order.DESCENDING);
ordering1.appendOrdering(keysLeft.get(1), null, Order.ASCENDING);
Ordering ordering2 = new Ordering();
ordering2.appendOrdering(keysRight.get(0), null, Order.DESCENDING);
ordering2.appendOrdering(keysRight.get(1), null, Order.ASCENDING);
RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
reqLeft.setRangePartitioned(ordering1, dist1);
RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
reqRight.setRangePartitioned(ordering2, dist2);
GlobalProperties propsLeft = new GlobalProperties();
propsLeft.setRangePartitioned(ordering1, dist1);
GlobalProperties propsRight = new GlobalProperties();
propsRight.setRangePartitioned(ordering2, dist2);
assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.operators.util.FieldList in project flink by apache.
the class GroupReduceCompilationTest method testGroupedReduceWithFieldPositionKeyCombinable.
@Test
public void testGroupedReduceWithFieldPositionKeyCombinable() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(8);
DataSet<Tuple2<String, Double>> data = env.readCsvFile("file:///will/never/be/read").types(String.class, Double.class).name("source").setParallelism(6);
GroupReduceOperator<Tuple2<String, Double>, Tuple2<String, Double>> reduced = data.groupBy(1).reduceGroup(new CombineReducer()).name("reducer");
reduced.setCombinable(true);
reduced.output(new DiscardingOutputFormat<Tuple2<String, Double>>()).name("sink");
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(op);
// get the original nodes
SourcePlanNode sourceNode = resolver.getNode("source");
SingleInputPlanNode reduceNode = resolver.getNode("reducer");
SinkPlanNode sinkNode = resolver.getNode("sink");
// get the combiner
SingleInputPlanNode combineNode = (SingleInputPlanNode) reduceNode.getInput().getSource();
// check wiring
assertEquals(sourceNode, combineNode.getInput().getSource());
assertEquals(reduceNode, sinkNode.getInput().getSource());
// check that both reduce and combiner have the same strategy
assertEquals(DriverStrategy.SORTED_GROUP_REDUCE, reduceNode.getDriverStrategy());
assertEquals(DriverStrategy.SORTED_GROUP_COMBINE, combineNode.getDriverStrategy());
// check the keys
assertEquals(new FieldList(1), reduceNode.getKeys(0));
assertEquals(new FieldList(1), combineNode.getKeys(0));
assertEquals(new FieldList(1), combineNode.getKeys(1));
assertEquals(new FieldList(1), reduceNode.getInput().getLocalStrategyKeys());
// check parallelism
assertEquals(6, sourceNode.getParallelism());
assertEquals(6, combineNode.getParallelism());
assertEquals(8, reduceNode.getParallelism());
assertEquals(8, sinkNode.getParallelism());
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail(e.getClass().getSimpleName() + " in test: " + e.getMessage());
}
}
use of org.apache.flink.api.common.operators.util.FieldList in project flink by apache.
the class GroupReduceCompilationTest method testGroupedReduceWithSelectorFunctionKeyCombinable.
@Test
public void testGroupedReduceWithSelectorFunctionKeyCombinable() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(8);
DataSet<Tuple2<String, Double>> data = env.readCsvFile("file:///will/never/be/read").types(String.class, Double.class).name("source").setParallelism(6);
GroupReduceOperator<Tuple2<String, Double>, Tuple2<String, Double>> reduced = data.groupBy(new KeySelector<Tuple2<String, Double>, String>() {
public String getKey(Tuple2<String, Double> value) {
return value.f0;
}
}).reduceGroup(new CombineReducer()).name("reducer");
reduced.setCombinable(true);
reduced.output(new DiscardingOutputFormat<Tuple2<String, Double>>()).name("sink");
Plan p = env.createProgramPlan();
OptimizedPlan op = compileNoStats(p);
OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(op);
// get the original nodes
SourcePlanNode sourceNode = resolver.getNode("source");
SingleInputPlanNode reduceNode = resolver.getNode("reducer");
SinkPlanNode sinkNode = resolver.getNode("sink");
// get the combiner
SingleInputPlanNode combineNode = (SingleInputPlanNode) reduceNode.getInput().getSource();
// get the key extractors and projectors
SingleInputPlanNode keyExtractor = (SingleInputPlanNode) combineNode.getInput().getSource();
SingleInputPlanNode keyProjector = (SingleInputPlanNode) sinkNode.getInput().getSource();
// check wiring
assertEquals(sourceNode, keyExtractor.getInput().getSource());
assertEquals(keyProjector, sinkNode.getInput().getSource());
// check that both reduce and combiner have the same strategy
assertEquals(DriverStrategy.SORTED_GROUP_REDUCE, reduceNode.getDriverStrategy());
assertEquals(DriverStrategy.SORTED_GROUP_COMBINE, combineNode.getDriverStrategy());
// check the keys
assertEquals(new FieldList(0), reduceNode.getKeys(0));
assertEquals(new FieldList(0), combineNode.getKeys(0));
assertEquals(new FieldList(0), combineNode.getKeys(1));
assertEquals(new FieldList(0), reduceNode.getInput().getLocalStrategyKeys());
// check parallelism
assertEquals(6, sourceNode.getParallelism());
assertEquals(6, keyExtractor.getParallelism());
assertEquals(6, combineNode.getParallelism());
assertEquals(8, reduceNode.getParallelism());
assertEquals(8, keyProjector.getParallelism());
assertEquals(8, sinkNode.getParallelism());
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail(e.getClass().getSimpleName() + " in test: " + e.getMessage());
}
}
Aggregations