Search in sources :

Example 1 with CalciteConfig

use of org.apache.flink.table.planner.calcite.CalciteConfig in project flink by apache.

the class PushFilterIntoTableSourceScanRuleTest method setup.

@Before
public void setup() {
    util = batchTestUtil(TableConfig.getDefault());
    ((BatchTableTestUtil) util).buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE());
    CalciteConfig calciteConfig = TableConfigUtils.getCalciteConfig(util.tableEnv().getConfig());
    calciteConfig.getBatchProgram().get().addLast("rules", FlinkHepRuleSetProgramBuilder.<BatchOptimizeContext>newBuilder().setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_COLLECTION()).setHepMatchOrder(HepMatchOrder.BOTTOM_UP).add(RuleSets.ofList(PushFilterIntoTableSourceScanRule.INSTANCE, CoreRules.FILTER_PROJECT_TRANSPOSE)).build());
    // name: STRING, id: LONG, amount: INT, price: DOUBLE
    String ddl1 = "CREATE TABLE MyTable (\n" + "  name STRING,\n" + "  id bigint,\n" + "  amount int,\n" + "  price double\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'filterable-fields' = 'amount',\n" + " 'bounded' = 'true'\n" + ")";
    util.tableEnv().executeSql(ddl1);
    String ddl2 = "CREATE TABLE VirtualTable (\n" + "  name STRING,\n" + "  id bigint,\n" + "  amount int,\n" + "  virtualField as amount + 1,\n" + "  price double\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'filterable-fields' = 'amount',\n" + " 'bounded' = 'true'\n" + ")";
    util.tableEnv().executeSql(ddl2);
}
Also used : BatchTableTestUtil(org.apache.flink.table.planner.utils.BatchTableTestUtil) CalciteConfig(org.apache.flink.table.planner.calcite.CalciteConfig) Before(org.junit.Before)

Example 2 with CalciteConfig

use of org.apache.flink.table.planner.calcite.CalciteConfig in project flink by apache.

the class PushPartitionIntoTableSourceScanRuleTest method setup.

@Override
public void setup() throws Exception {
    util().buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE());
    CalciteConfig calciteConfig = TableConfigUtils.getCalciteConfig(util().tableEnv().getConfig());
    calciteConfig.getBatchProgram().get().addLast("rules", FlinkHepRuleSetProgramBuilder.<BatchOptimizeContext>newBuilder().setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE()).setHepMatchOrder(HepMatchOrder.BOTTOM_UP).add(RuleSets.ofList(CoreRules.FILTER_PROJECT_TRANSPOSE, PushPartitionIntoTableSourceScanRule.INSTANCE)).build());
    // define ddl
    String ddlTemp = "CREATE TABLE MyTable (\n" + "  id int,\n" + "  name string,\n" + "  part1 string,\n" + "  part2 int)\n" + "  partitioned by (part1, part2)\n" + "  WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true',\n" + " 'partition-list' = '%s'" + ")";
    String ddlTempWithVirtualColumn = "CREATE TABLE VirtualTable (\n" + "  id int,\n" + "  name string,\n" + "  part1 string,\n" + "  part2 int,\n" + "  virtualField AS part2 + 1)\n" + "  partitioned by (part1, part2)\n" + "  WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true',\n" + " 'partition-list' = '%s'" + ")";
    if (sourceFetchPartitions()) {
        String partitionString = "part1:A,part2:1;part1:A,part2:2;part1:B,part2:3;part1:C,part2:1";
        util().tableEnv().executeSql(String.format(ddlTemp, partitionString));
        util().tableEnv().executeSql(String.format(ddlTempWithVirtualColumn, partitionString));
    } else {
        TestValuesCatalog catalog = new TestValuesCatalog("test_catalog", "test_database", useCatalogFilter());
        util().tableEnv().registerCatalog("test_catalog", catalog);
        util().tableEnv().useCatalog("test_catalog");
        // register table without partitions
        util().tableEnv().executeSql(String.format(ddlTemp, ""));
        util().tableEnv().executeSql(String.format(ddlTempWithVirtualColumn, ""));
        ObjectPath mytablePath = ObjectPath.fromString("test_database.MyTable");
        ObjectPath virtualTablePath = ObjectPath.fromString("test_database.VirtualTable");
        // partition map
        List<Map<String, String>> partitions = Arrays.asList(new HashMap<String, String>() {

            {
                put("part1", "A");
                put("part2", "1");
            }
        }, new HashMap<String, String>() {

            {
                put("part1", "A");
                put("part2", "2");
            }
        }, new HashMap<String, String>() {

            {
                put("part1", "B");
                put("part2", "3");
            }
        }, new HashMap<String, String>() {

            {
                put("part1", "C");
                put("part2", "1");
            }
        });
        for (Map<String, String> partition : partitions) {
            CatalogPartitionSpec catalogPartitionSpec = new CatalogPartitionSpec(partition);
            CatalogPartition catalogPartition = new CatalogPartitionImpl(new HashMap<>(), "");
            catalog.createPartition(mytablePath, catalogPartitionSpec, catalogPartition, true);
            catalog.createPartition(virtualTablePath, catalogPartitionSpec, catalogPartition, true);
        }
    }
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) CalciteConfig(org.apache.flink.table.planner.calcite.CalciteConfig) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) TestValuesCatalog(org.apache.flink.table.planner.factories.TestValuesCatalog) HashMap(java.util.HashMap) Map(java.util.Map) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl)

Example 3 with CalciteConfig

use of org.apache.flink.table.planner.calcite.CalciteConfig in project flink by apache.

the class PushLimitIntoTableSourceScanRuleTest method setup.

@Override
public void setup() {
    util().buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE());
    CalciteConfig calciteConfig = TableConfigUtils.getCalciteConfig(util().tableEnv().getConfig());
    calciteConfig.getBatchProgram().get().addLast("rules", FlinkHepRuleSetProgramBuilder.<BatchOptimizeContext>newBuilder().setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_COLLECTION()).setHepMatchOrder(HepMatchOrder.BOTTOM_UP).add(RuleSets.ofList(PushLimitIntoTableSourceScanRule.INSTANCE, CoreRules.SORT_PROJECT_TRANSPOSE, // rel(FlinkRelNode)
    FlinkLogicalSort.BATCH_CONVERTER(), FlinkLogicalTableSourceScan.CONVERTER())).build());
    String ddl = "CREATE TABLE LimitTable (\n" + "  a int,\n" + "  b bigint,\n" + "  c string\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true'\n" + ")";
    util().tableEnv().executeSql(ddl);
}
Also used : CalciteConfig(org.apache.flink.table.planner.calcite.CalciteConfig)

Example 4 with CalciteConfig

use of org.apache.flink.table.planner.calcite.CalciteConfig in project flink by apache.

the class PushProjectIntoTableSourceScanRuleTest method setup.

@Override
public void setup() {
    util().buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE());
    CalciteConfig calciteConfig = TableConfigUtils.getCalciteConfig(util().tableEnv().getConfig());
    calciteConfig.getBatchProgram().get().addLast("rules", FlinkHepRuleSetProgramBuilder.<BatchOptimizeContext>newBuilder().setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE()).setHepMatchOrder(HepMatchOrder.BOTTOM_UP).add(RuleSets.ofList(PushProjectIntoTableSourceScanRule.INSTANCE)).build());
    String ddl1 = "CREATE TABLE MyTable (\n" + "  a int,\n" + "  b bigint,\n" + "  c string\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true'\n" + ")";
    util().tableEnv().executeSql(ddl1);
    String ddl2 = "CREATE TABLE VirtualTable (\n" + "  a int,\n" + "  b bigint,\n" + "  c string,\n" + "  d as a + 1\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true'\n" + ")";
    util().tableEnv().executeSql(ddl2);
    String ddl3 = "CREATE TABLE NestedTable (\n" + "  id int,\n" + "  deepNested row<nested1 row<name string, `value` int>, nested2 row<num int, flag boolean>>,\n" + "  nested row<name string, `value` int>,\n" + "  `deepNestedWith.` row<`.value` int, nested row<name string, `.value` int>>,\n" + "  name string,\n" + "  testMap Map<string, string>\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'nested-projection-supported' = 'true'," + " 'bounded' = 'true'\n" + ")";
    util().tableEnv().executeSql(ddl3);
    String ddl4 = "CREATE TABLE MetadataTable(\n" + "  id int,\n" + "  deepNested row<nested1 row<name string, `value` int>, nested2 row<num int, flag boolean>>,\n" + "  metadata_1 int metadata,\n" + "  metadata_2 string metadata\n" + ") WITH (" + " 'connector' = 'values'," + " 'nested-projection-supported' = 'true'," + " 'bounded' = 'true',\n" + " 'readable-metadata' = 'metadata_1:INT, metadata_2:STRING, metadata_3:BIGINT'" + ")";
    util().tableEnv().executeSql(ddl4);
    String ddl5 = "CREATE TABLE UpsertTable(" + "  id int,\n" + "  deepNested row<nested1 row<name string, `value` int>, nested2 row<num int, flag boolean>>,\n" + "  metadata_1 int metadata,\n" + "  metadata_2 string metadata,\n" + "  PRIMARY KEY(id, deepNested) NOT ENFORCED" + ") WITH (" + "  'connector' = 'values'," + "  'nested-projection-supported' = 'true'," + "  'bounded' = 'false',\n" + "  'changelod-mode' = 'I,UB,D'," + " 'readable-metadata' = 'metadata_1:INT, metadata_2:STRING, metadata_3:BIGINT'" + ")";
    util().tableEnv().executeSql(ddl5);
    String ddl6 = "CREATE TABLE NestedItemTable (\n" + "  `ID` INT,\n" + "  `Timestamp` TIMESTAMP(3),\n" + "  `Result` ROW<\n" + "    `Mid` ROW<" + "      `data_arr` ROW<`value` BIGINT> ARRAY,\n" + "      `data_map` MAP<STRING, ROW<`value` BIGINT>>" + "     >" + "   >,\n" + "   WATERMARK FOR `Timestamp` AS `Timestamp`\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'nested-projection-supported' = 'true'," + " 'bounded' = 'true'\n" + ")";
    util().tableEnv().executeSql(ddl6);
    String ddl7 = "CREATE TABLE ItemTable (\n" + "  `ID` INT,\n" + "  `Timestamp` TIMESTAMP(3),\n" + "  `Result` ROW<\n" + "    `data_arr` ROW<`value` BIGINT> ARRAY,\n" + "    `data_map` MAP<STRING, ROW<`value` BIGINT>>>,\n" + "  `outer_array` ARRAY<INT>,\n" + "  `outer_map` MAP<STRING, STRING>,\n" + "   WATERMARK FOR `Timestamp` AS `Timestamp`\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true'\n" + ")";
    util().tableEnv().executeSql(ddl7);
}
Also used : CalciteConfig(org.apache.flink.table.planner.calcite.CalciteConfig)

Aggregations

CalciteConfig (org.apache.flink.table.planner.calcite.CalciteConfig)4 HashMap (java.util.HashMap)1 Map (java.util.Map)1 CatalogPartition (org.apache.flink.table.catalog.CatalogPartition)1 CatalogPartitionImpl (org.apache.flink.table.catalog.CatalogPartitionImpl)1 CatalogPartitionSpec (org.apache.flink.table.catalog.CatalogPartitionSpec)1 ObjectPath (org.apache.flink.table.catalog.ObjectPath)1 TestValuesCatalog (org.apache.flink.table.planner.factories.TestValuesCatalog)1 BatchTableTestUtil (org.apache.flink.table.planner.utils.BatchTableTestUtil)1 Before (org.junit.Before)1