Search in sources :

Example 1 with Partitioner

use of org.apache.flink.api.common.functions.Partitioner in project flink by apache.

the class JoinGlobalPropertiesCompatibilityTest method checkCompatiblePartitionings.

@Test
public void checkCompatiblePartitionings() {
    try {
        final FieldList keysLeft = new FieldList(1, 4);
        final FieldList keysRight = new FieldList(3, 1);
        SortMergeInnerJoinDescriptor descr = new SortMergeInnerJoinDescriptor(keysLeft, keysRight);
        // test compatible hash partitioning
        {
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setHashPartitioned(keysLeft);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setHashPartitioned(keysRight);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setHashPartitioned(keysLeft);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setHashPartitioned(keysRight);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test compatible custom partitioning
        {
            Partitioner<Object> part = new Partitioner<Object>() {

                @Override
                public int partition(Object key, int numPartitions) {
                    return 0;
                }
            };
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setCustomPartitioned(keysLeft, part);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setCustomPartitioned(keysRight, part);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setCustomPartitioned(keysLeft, part);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setCustomPartitioned(keysRight, part);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test custom partitioning matching any partitioning
        {
            Partitioner<Object> part = new Partitioner<Object>() {

                @Override
                public int partition(Object key, int numPartitions) {
                    return 0;
                }
            };
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setAnyPartitioning(keysLeft);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setAnyPartitioning(keysRight);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setCustomPartitioned(keysLeft, part);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setCustomPartitioned(keysRight, part);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        TestDistribution dist1 = new TestDistribution(1);
        TestDistribution dist2 = new TestDistribution(1);
        // test compatible range partitioning with one ordering
        {
            Ordering ordering1 = new Ordering();
            for (int field : keysLeft) {
                ordering1.appendOrdering(field, null, Order.ASCENDING);
            }
            Ordering ordering2 = new Ordering();
            for (int field : keysRight) {
                ordering2.appendOrdering(field, null, Order.ASCENDING);
            }
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist1);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist2);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist1);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist2);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test compatible range partitioning with two orderings
        {
            Ordering ordering1 = new Ordering();
            ordering1.appendOrdering(keysLeft.get(0), null, Order.DESCENDING);
            ordering1.appendOrdering(keysLeft.get(1), null, Order.ASCENDING);
            Ordering ordering2 = new Ordering();
            ordering2.appendOrdering(keysRight.get(0), null, Order.DESCENDING);
            ordering2.appendOrdering(keysRight.get(1), null, Order.ASCENDING);
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist1);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist2);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist1);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist2);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) GlobalProperties(org.apache.flink.optimizer.dataproperties.GlobalProperties) Ordering(org.apache.flink.api.common.operators.Ordering) Partitioner(org.apache.flink.api.common.functions.Partitioner) FieldList(org.apache.flink.api.common.operators.util.FieldList) Test(org.junit.Test)

Example 2 with Partitioner

use of org.apache.flink.api.common.functions.Partitioner in project flink by apache.

the class CoGroupGlobalPropertiesCompatibilityTest method checkCompatiblePartitionings.

@Test
public void checkCompatiblePartitionings() {
    try {
        final FieldList keysLeft = new FieldList(1, 4);
        final FieldList keysRight = new FieldList(3, 1);
        CoGroupDescriptor descr = new CoGroupDescriptor(keysLeft, keysRight);
        // test compatible hash partitioning
        {
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setHashPartitioned(keysLeft);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setHashPartitioned(keysRight);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setHashPartitioned(keysLeft);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setHashPartitioned(keysRight);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test compatible custom partitioning
        {
            Partitioner<Object> part = new Partitioner<Object>() {

                @Override
                public int partition(Object key, int numPartitions) {
                    return 0;
                }
            };
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setCustomPartitioned(keysLeft, part);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setCustomPartitioned(keysRight, part);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setCustomPartitioned(keysLeft, part);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setCustomPartitioned(keysRight, part);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test custom partitioning matching any partitioning
        {
            Partitioner<Object> part = new Partitioner<Object>() {

                @Override
                public int partition(Object key, int numPartitions) {
                    return 0;
                }
            };
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setAnyPartitioning(keysLeft);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setAnyPartitioning(keysRight);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setCustomPartitioned(keysLeft, part);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setCustomPartitioned(keysRight, part);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        TestDistribution dist1 = new TestDistribution(1);
        TestDistribution dist2 = new TestDistribution(1);
        // test compatible range partitioning with one ordering
        {
            Ordering ordering1 = new Ordering();
            for (int field : keysLeft) {
                ordering1.appendOrdering(field, null, Order.ASCENDING);
            }
            Ordering ordering2 = new Ordering();
            for (int field : keysRight) {
                ordering2.appendOrdering(field, null, Order.ASCENDING);
            }
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist1);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist2);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist1);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist2);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test compatible range partitioning with two orderings
        {
            Ordering ordering1 = new Ordering();
            ordering1.appendOrdering(keysLeft.get(0), null, Order.DESCENDING);
            ordering1.appendOrdering(keysLeft.get(1), null, Order.ASCENDING);
            Ordering ordering2 = new Ordering();
            ordering2.appendOrdering(keysRight.get(0), null, Order.DESCENDING);
            ordering2.appendOrdering(keysRight.get(1), null, Order.ASCENDING);
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist1);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist2);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist1);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist2);
            assertTrue(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) GlobalProperties(org.apache.flink.optimizer.dataproperties.GlobalProperties) Ordering(org.apache.flink.api.common.operators.Ordering) Partitioner(org.apache.flink.api.common.functions.Partitioner) FieldList(org.apache.flink.api.common.operators.util.FieldList) Test(org.junit.Test)

Example 3 with Partitioner

use of org.apache.flink.api.common.functions.Partitioner in project flink by apache.

the class CustomPartitioningTest method testPartitionKeySelectorInvalidType.

@Test
public void testPartitionKeySelectorInvalidType() {
    try {
        final Partitioner<Integer> part = (Partitioner<Integer>) (Partitioner<?>) new TestPartitionerLong();
        final int parallelism = 4;
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
        DataSet<Pojo> data = env.fromElements(new Pojo()).rebalance();
        try {
            data.partitionCustom(part, new TestKeySelectorInt<Pojo>());
            fail("Should throw an exception");
        } catch (InvalidProgramException e) {
        // expected
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) InvalidProgramException(org.apache.flink.api.common.InvalidProgramException) Partitioner(org.apache.flink.api.common.functions.Partitioner) InvalidProgramException(org.apache.flink.api.common.InvalidProgramException) Test(org.junit.Test)

Example 4 with Partitioner

use of org.apache.flink.api.common.functions.Partitioner in project flink by apache.

the class DataStreamTest method testPartitioning.

/**
 * Tests that {@link DataStream#keyBy} and {@link DataStream#partitionCustom(Partitioner, int)}
 * result in different and correct topologies. Does the some for the {@link ConnectedStreams}.
 */
@Test
public void testPartitioning() {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Tuple2<Long, Long>> src1 = env.fromElements(new Tuple2<>(0L, 0L));
    DataStream<Tuple2<Long, Long>> src2 = env.fromElements(new Tuple2<>(0L, 0L));
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connected = src1.connect(src2);
    // Testing DataStream grouping
    DataStream<Tuple2<Long, Long>> group1 = src1.keyBy(0);
    DataStream<Tuple2<Long, Long>> group2 = src1.keyBy(1, 0);
    DataStream<Tuple2<Long, Long>> group3 = src1.keyBy("f0");
    DataStream<Tuple2<Long, Long>> group4 = src1.keyBy(new FirstSelector());
    int id1 = createDownStreamId(group1);
    int id2 = createDownStreamId(group2);
    int id3 = createDownStreamId(group3);
    int id4 = createDownStreamId(group4);
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), id1)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), id2)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), id3)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), id4)));
    assertTrue(isKeyed(group1));
    assertTrue(isKeyed(group2));
    assertTrue(isKeyed(group3));
    assertTrue(isKeyed(group4));
    // Testing DataStream partitioning
    DataStream<Tuple2<Long, Long>> partition1 = src1.keyBy(0);
    DataStream<Tuple2<Long, Long>> partition2 = src1.keyBy(1, 0);
    DataStream<Tuple2<Long, Long>> partition3 = src1.keyBy("f0");
    DataStream<Tuple2<Long, Long>> partition4 = src1.keyBy(new FirstSelector());
    int pid1 = createDownStreamId(partition1);
    int pid2 = createDownStreamId(partition2);
    int pid3 = createDownStreamId(partition3);
    int pid4 = createDownStreamId(partition4);
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), pid1)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), pid2)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), pid3)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), pid4)));
    assertTrue(isKeyed(partition1));
    assertTrue(isKeyed(partition3));
    assertTrue(isKeyed(partition2));
    assertTrue(isKeyed(partition4));
    // Testing DataStream custom partitioning
    Partitioner<Long> longPartitioner = new Partitioner<Long>() {

        @Override
        public int partition(Long key, int numPartitions) {
            return 100;
        }
    };
    DataStream<Tuple2<Long, Long>> customPartition1 = src1.partitionCustom(longPartitioner, 0);
    DataStream<Tuple2<Long, Long>> customPartition3 = src1.partitionCustom(longPartitioner, "f0");
    DataStream<Tuple2<Long, Long>> customPartition4 = src1.partitionCustom(longPartitioner, new FirstSelector());
    int cid1 = createDownStreamId(customPartition1);
    int cid2 = createDownStreamId(customPartition3);
    int cid3 = createDownStreamId(customPartition4);
    assertTrue(isCustomPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), cid1)));
    assertTrue(isCustomPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), cid2)));
    assertTrue(isCustomPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), cid3)));
    assertFalse(isKeyed(customPartition1));
    assertFalse(isKeyed(customPartition3));
    assertFalse(isKeyed(customPartition4));
    // Testing ConnectedStreams grouping
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedGroup1 = connected.keyBy(0, 0);
    Integer downStreamId1 = createDownStreamId(connectedGroup1);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedGroup2 = connected.keyBy(new int[] { 0 }, new int[] { 0 });
    Integer downStreamId2 = createDownStreamId(connectedGroup2);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedGroup3 = connected.keyBy("f0", "f0");
    Integer downStreamId3 = createDownStreamId(connectedGroup3);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedGroup4 = connected.keyBy(new String[] { "f0" }, new String[] { "f0" });
    Integer downStreamId4 = createDownStreamId(connectedGroup4);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedGroup5 = connected.keyBy(new FirstSelector(), new FirstSelector());
    Integer downStreamId5 = createDownStreamId(connectedGroup5);
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), downStreamId1)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), downStreamId1)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), downStreamId2)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), downStreamId2)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), downStreamId3)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), downStreamId3)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), downStreamId4)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), downStreamId4)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), downStreamId5)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), downStreamId5)));
    assertTrue(isKeyed(connectedGroup1));
    assertTrue(isKeyed(connectedGroup2));
    assertTrue(isKeyed(connectedGroup3));
    assertTrue(isKeyed(connectedGroup4));
    assertTrue(isKeyed(connectedGroup5));
    // Testing ConnectedStreams partitioning
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedPartition1 = connected.keyBy(0, 0);
    Integer connectDownStreamId1 = createDownStreamId(connectedPartition1);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedPartition2 = connected.keyBy(new int[] { 0 }, new int[] { 0 });
    Integer connectDownStreamId2 = createDownStreamId(connectedPartition2);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedPartition3 = connected.keyBy("f0", "f0");
    Integer connectDownStreamId3 = createDownStreamId(connectedPartition3);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedPartition4 = connected.keyBy(new String[] { "f0" }, new String[] { "f0" });
    Integer connectDownStreamId4 = createDownStreamId(connectedPartition4);
    ConnectedStreams<Tuple2<Long, Long>, Tuple2<Long, Long>> connectedPartition5 = connected.keyBy(new FirstSelector(), new FirstSelector());
    Integer connectDownStreamId5 = createDownStreamId(connectedPartition5);
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), connectDownStreamId1)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), connectDownStreamId1)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), connectDownStreamId2)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), connectDownStreamId2)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), connectDownStreamId3)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), connectDownStreamId3)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), connectDownStreamId4)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), connectDownStreamId4)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src1.getId(), connectDownStreamId5)));
    assertTrue(isPartitioned(getStreamGraph(env).getStreamEdgesOrThrow(src2.getId(), connectDownStreamId5)));
    assertTrue(isKeyed(connectedPartition1));
    assertTrue(isKeyed(connectedPartition2));
    assertTrue(isKeyed(connectedPartition3));
    assertTrue(isKeyed(connectedPartition4));
    assertTrue(isKeyed(connectedPartition5));
}
Also used : Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) BroadcastPartitioner(org.apache.flink.streaming.runtime.partitioner.BroadcastPartitioner) ShufflePartitioner(org.apache.flink.streaming.runtime.partitioner.ShufflePartitioner) ForwardPartitioner(org.apache.flink.streaming.runtime.partitioner.ForwardPartitioner) GlobalPartitioner(org.apache.flink.streaming.runtime.partitioner.GlobalPartitioner) Partitioner(org.apache.flink.api.common.functions.Partitioner) StreamPartitioner(org.apache.flink.streaming.runtime.partitioner.StreamPartitioner) RebalancePartitioner(org.apache.flink.streaming.runtime.partitioner.RebalancePartitioner) KeyGroupStreamPartitioner(org.apache.flink.streaming.runtime.partitioner.KeyGroupStreamPartitioner) Test(org.junit.Test)

Example 5 with Partitioner

use of org.apache.flink.api.common.functions.Partitioner in project flink by apache.

the class JoinGlobalPropertiesCompatibilityTest method checkInompatiblePartitionings.

@Test
public void checkInompatiblePartitionings() {
    try {
        final FieldList keysLeft = new FieldList(1);
        final FieldList keysRight = new FieldList(3);
        final Partitioner<Object> part = new Partitioner<Object>() {

            @Override
            public int partition(Object key, int numPartitions) {
                return 0;
            }
        };
        final Partitioner<Object> part2 = new Partitioner<Object>() {

            @Override
            public int partition(Object key, int numPartitions) {
                return 0;
            }
        };
        SortMergeInnerJoinDescriptor descr = new SortMergeInnerJoinDescriptor(keysLeft, keysRight);
        // test incompatible hash with custom partitioning
        {
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setAnyPartitioning(keysLeft);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setAnyPartitioning(keysRight);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setHashPartitioned(keysLeft);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setCustomPartitioned(keysRight, part);
            assertFalse(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test incompatible custom partitionings
        {
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setAnyPartitioning(keysLeft);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setAnyPartitioning(keysRight);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setCustomPartitioned(keysLeft, part);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setCustomPartitioned(keysRight, part2);
            assertFalse(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        TestDistribution dist1 = new TestDistribution(1);
        TestDistribution dist2 = new TestDistribution(1);
        // test incompatible range partitioning with different key size
        {
            Ordering ordering1 = new Ordering();
            for (int field : keysLeft) {
                ordering1.appendOrdering(field, null, Order.ASCENDING);
            }
            Ordering ordering2 = new Ordering();
            for (int field : keysRight) {
                ordering1.appendOrdering(field, null, Order.ASCENDING);
                ordering2.appendOrdering(field, null, Order.ASCENDING);
            }
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist1);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist2);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist1);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist2);
            assertFalse(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        // test incompatible range partitioning with different ordering
        {
            Ordering ordering1 = new Ordering();
            for (int field : keysLeft) {
                ordering1.appendOrdering(field, null, Order.ASCENDING);
            }
            Ordering ordering2 = new Ordering();
            for (int field : keysRight) {
                ordering2.appendOrdering(field, null, Order.DESCENDING);
            }
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist1);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist2);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist1);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist2);
            assertFalse(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
        TestDistribution dist3 = new TestDistribution(1);
        TestDistribution dist4 = new TestDistribution(2);
        // test incompatible range partitioning with different distribution
        {
            Ordering ordering1 = new Ordering();
            for (int field : keysLeft) {
                ordering1.appendOrdering(field, null, Order.ASCENDING);
            }
            Ordering ordering2 = new Ordering();
            for (int field : keysRight) {
                ordering2.appendOrdering(field, null, Order.ASCENDING);
            }
            RequestedGlobalProperties reqLeft = new RequestedGlobalProperties();
            reqLeft.setRangePartitioned(ordering1, dist3);
            RequestedGlobalProperties reqRight = new RequestedGlobalProperties();
            reqRight.setRangePartitioned(ordering2, dist4);
            GlobalProperties propsLeft = new GlobalProperties();
            propsLeft.setRangePartitioned(ordering1, dist3);
            GlobalProperties propsRight = new GlobalProperties();
            propsRight.setRangePartitioned(ordering2, dist4);
            assertFalse(descr.areCompatible(reqLeft, reqRight, propsLeft, propsRight));
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) GlobalProperties(org.apache.flink.optimizer.dataproperties.GlobalProperties) Ordering(org.apache.flink.api.common.operators.Ordering) Partitioner(org.apache.flink.api.common.functions.Partitioner) FieldList(org.apache.flink.api.common.operators.util.FieldList) Test(org.junit.Test)

Aggregations

Partitioner (org.apache.flink.api.common.functions.Partitioner)10 Test (org.junit.Test)10 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)5 Ordering (org.apache.flink.api.common.operators.Ordering)4 FieldList (org.apache.flink.api.common.operators.util.FieldList)4 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)4 GlobalProperties (org.apache.flink.optimizer.dataproperties.GlobalProperties)4 RequestedGlobalProperties (org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties)4 Plan (org.apache.flink.api.common.Plan)3 OptimizedPlan (org.apache.flink.optimizer.plan.OptimizedPlan)3 SingleInputPlanNode (org.apache.flink.optimizer.plan.SingleInputPlanNode)3 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)3 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)2 DualInputPlanNode (org.apache.flink.optimizer.plan.DualInputPlanNode)2 JobGraphGenerator (org.apache.flink.optimizer.plantranslate.JobGraphGenerator)2 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)2 InvalidProgramException (org.apache.flink.api.common.InvalidProgramException)1 MapFunction (org.apache.flink.api.common.functions.MapFunction)1 RichMapFunction (org.apache.flink.api.common.functions.RichMapFunction)1 Tuple1 (org.apache.flink.api.java.tuple.Tuple1)1