Search in sources :

Example 1 with CatalogPartitionImpl

use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableProps.

private Operation convertAlterTableProps(CatalogBaseTable oldBaseTable, String tableName, Map<String, String> partSpec, Map<String, String> newProps) {
    ObjectIdentifier tableIdentifier = parseObjectIdentifier(tableName);
    CatalogTable oldTable = (CatalogTable) oldBaseTable;
    CatalogPartitionSpec catalogPartitionSpec = partSpec != null ? new CatalogPartitionSpec(partSpec) : null;
    CatalogPartition catalogPartition = partSpec != null ? getPartition(tableIdentifier, catalogPartitionSpec) : null;
    Map<String, String> props = new HashMap<>();
    if (catalogPartition != null) {
        props.putAll(catalogPartition.getProperties());
        props.putAll(newProps);
        return new AlterPartitionPropertiesOperation(tableIdentifier, catalogPartitionSpec, new CatalogPartitionImpl(props, catalogPartition.getComment()));
    } else {
        props.putAll(oldTable.getOptions());
        props.putAll(newProps);
        return new AlterTableOptionsOperation(tableIdentifier, oldTable.copy(props));
    }
}
Also used : AlterTableOptionsOperation(org.apache.flink.table.operations.ddl.AlterTableOptionsOperation) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) AlterPartitionPropertiesOperation(org.apache.flink.table.operations.ddl.AlterPartitionPropertiesOperation) CatalogTable(org.apache.flink.table.catalog.CatalogTable) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl)

Example 2 with CatalogPartitionImpl

use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.

the class HiveCatalog method getPartition.

@Override
public CatalogPartition getPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws PartitionNotExistException, CatalogException {
    checkNotNull(tablePath, "Table path cannot be null");
    checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be null");
    try {
        Partition hivePartition = getHivePartition(tablePath, partitionSpec);
        Map<String, String> properties = hivePartition.getParameters();
        properties.put(SqlCreateHiveTable.TABLE_LOCATION_URI, hivePartition.getSd().getLocation());
        String comment = properties.remove(HiveCatalogConfig.COMMENT);
        return new CatalogPartitionImpl(properties, comment);
    } catch (NoSuchObjectException | MetaException | TableNotExistException | PartitionSpecInvalidException e) {
        throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
    } catch (TException e) {
        throw new CatalogException(String.format("Failed to get partition %s of table %s", partitionSpec, tablePath), e);
    }
}
Also used : TException(org.apache.thrift.TException) Partition(org.apache.hadoop.hive.metastore.api.Partition) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) TableNotExistException(org.apache.flink.table.catalog.exceptions.TableNotExistException) CatalogException(org.apache.flink.table.catalog.exceptions.CatalogException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) PartitionNotExistException(org.apache.flink.table.catalog.exceptions.PartitionNotExistException) PartitionSpecInvalidException(org.apache.flink.table.catalog.exceptions.PartitionSpecInvalidException) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 3 with CatalogPartitionImpl

use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.

the class PushPartitionIntoTableSourceScanRuleTest method setup.

@Override
public void setup() throws Exception {
    util().buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE());
    CalciteConfig calciteConfig = TableConfigUtils.getCalciteConfig(util().tableEnv().getConfig());
    calciteConfig.getBatchProgram().get().addLast("rules", FlinkHepRuleSetProgramBuilder.<BatchOptimizeContext>newBuilder().setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE()).setHepMatchOrder(HepMatchOrder.BOTTOM_UP).add(RuleSets.ofList(CoreRules.FILTER_PROJECT_TRANSPOSE, PushPartitionIntoTableSourceScanRule.INSTANCE)).build());
    // define ddl
    String ddlTemp = "CREATE TABLE MyTable (\n" + "  id int,\n" + "  name string,\n" + "  part1 string,\n" + "  part2 int)\n" + "  partitioned by (part1, part2)\n" + "  WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true',\n" + " 'partition-list' = '%s'" + ")";
    String ddlTempWithVirtualColumn = "CREATE TABLE VirtualTable (\n" + "  id int,\n" + "  name string,\n" + "  part1 string,\n" + "  part2 int,\n" + "  virtualField AS part2 + 1)\n" + "  partitioned by (part1, part2)\n" + "  WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true',\n" + " 'partition-list' = '%s'" + ")";
    if (sourceFetchPartitions()) {
        String partitionString = "part1:A,part2:1;part1:A,part2:2;part1:B,part2:3;part1:C,part2:1";
        util().tableEnv().executeSql(String.format(ddlTemp, partitionString));
        util().tableEnv().executeSql(String.format(ddlTempWithVirtualColumn, partitionString));
    } else {
        TestValuesCatalog catalog = new TestValuesCatalog("test_catalog", "test_database", useCatalogFilter());
        util().tableEnv().registerCatalog("test_catalog", catalog);
        util().tableEnv().useCatalog("test_catalog");
        // register table without partitions
        util().tableEnv().executeSql(String.format(ddlTemp, ""));
        util().tableEnv().executeSql(String.format(ddlTempWithVirtualColumn, ""));
        ObjectPath mytablePath = ObjectPath.fromString("test_database.MyTable");
        ObjectPath virtualTablePath = ObjectPath.fromString("test_database.VirtualTable");
        // partition map
        List<Map<String, String>> partitions = Arrays.asList(new HashMap<String, String>() {

            {
                put("part1", "A");
                put("part2", "1");
            }
        }, new HashMap<String, String>() {

            {
                put("part1", "A");
                put("part2", "2");
            }
        }, new HashMap<String, String>() {

            {
                put("part1", "B");
                put("part2", "3");
            }
        }, new HashMap<String, String>() {

            {
                put("part1", "C");
                put("part2", "1");
            }
        });
        for (Map<String, String> partition : partitions) {
            CatalogPartitionSpec catalogPartitionSpec = new CatalogPartitionSpec(partition);
            CatalogPartition catalogPartition = new CatalogPartitionImpl(new HashMap<>(), "");
            catalog.createPartition(mytablePath, catalogPartitionSpec, catalogPartition, true);
            catalog.createPartition(virtualTablePath, catalogPartitionSpec, catalogPartition, true);
        }
    }
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) CalciteConfig(org.apache.flink.table.planner.calcite.CalciteConfig) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) TestValuesCatalog(org.apache.flink.table.planner.factories.TestValuesCatalog) HashMap(java.util.HashMap) Map(java.util.Map) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl)

Example 4 with CatalogPartitionImpl

use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.

the class SqlToOperationConverter method convertAlterTableOptions.

private Operation convertAlterTableOptions(ObjectIdentifier tableIdentifier, CatalogTable oldTable, SqlAlterTableOptions alterTableOptions) {
    LinkedHashMap<String, String> partitionKVs = alterTableOptions.getPartitionKVs();
    // it's altering partitions
    if (partitionKVs != null) {
        CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs);
        CatalogPartition catalogPartition = catalogManager.getPartition(tableIdentifier, partitionSpec).orElseThrow(() -> new ValidationException(String.format("Partition %s of table %s doesn't exist", partitionSpec.getPartitionSpec(), tableIdentifier)));
        Map<String, String> newProps = new HashMap<>(catalogPartition.getProperties());
        newProps.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
        return new AlterPartitionPropertiesOperation(tableIdentifier, partitionSpec, new CatalogPartitionImpl(newProps, catalogPartition.getComment()));
    } else {
        // it's altering a table
        Map<String, String> newOptions = new HashMap<>(oldTable.getOptions());
        newOptions.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
        return new AlterTableOptionsOperation(tableIdentifier, oldTable.copy(newOptions));
    }
}
Also used : AlterTableOptionsOperation(org.apache.flink.table.operations.ddl.AlterTableOptionsOperation) ValidationException(org.apache.flink.table.api.ValidationException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) AlterPartitionPropertiesOperation(org.apache.flink.table.operations.ddl.AlterPartitionPropertiesOperation) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl)

Example 5 with CatalogPartitionImpl

use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.

the class CatalogStatisticsTest method createPartitionStats.

private void createPartitionStats(String part1, int part2, long rowCount) throws Exception {
    ObjectPath path = ObjectPath.fromString("default_database.PartT");
    LinkedHashMap<String, String> partSpecMap = new LinkedHashMap<>();
    partSpecMap.put("part1", part1);
    partSpecMap.put("part2", String.valueOf(part2));
    CatalogPartitionSpec partSpec = new CatalogPartitionSpec(partSpecMap);
    catalog.createPartition(path, partSpec, new CatalogPartitionImpl(new HashMap<>(), ""), true);
    catalog.alterPartitionStatistics(path, partSpec, new CatalogTableStatistics(rowCount, 10, 1000L, 2000L), true);
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) CatalogColumnStatisticsDataString(org.apache.flink.table.catalog.stats.CatalogColumnStatisticsDataString) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) LinkedHashMap(java.util.LinkedHashMap) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl) CatalogTableStatistics(org.apache.flink.table.catalog.stats.CatalogTableStatistics)

Aggregations

CatalogPartitionImpl (org.apache.flink.table.catalog.CatalogPartitionImpl)7 HashMap (java.util.HashMap)6 CatalogPartitionSpec (org.apache.flink.table.catalog.CatalogPartitionSpec)6 LinkedHashMap (java.util.LinkedHashMap)5 CatalogPartition (org.apache.flink.table.catalog.CatalogPartition)5 ValidationException (org.apache.flink.table.api.ValidationException)3 CatalogTable (org.apache.flink.table.catalog.CatalogTable)3 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)3 ObjectPath (org.apache.flink.table.catalog.ObjectPath)3 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)2 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)2 AddPartitionsOperation (org.apache.flink.table.operations.ddl.AddPartitionsOperation)2 AlterPartitionPropertiesOperation (org.apache.flink.table.operations.ddl.AlterPartitionPropertiesOperation)2 AlterTableOptionsOperation (org.apache.flink.table.operations.ddl.AlterTableOptionsOperation)2 List (java.util.List)1 RelHint (org.apache.calcite.rel.hint.RelHint)1 SqlNodeList (org.apache.calcite.sql.SqlNodeList)1 SqlAddPartitions (org.apache.flink.sql.parser.ddl.SqlAddPartitions)1