Search in sources :

Example 6 with CatalogPartition

use of org.apache.flink.table.catalog.CatalogPartition in project flink by apache.

the class SqlToOperationConverter method convertAlterTableOptions.

private Operation convertAlterTableOptions(ObjectIdentifier tableIdentifier, CatalogTable oldTable, SqlAlterTableOptions alterTableOptions) {
    LinkedHashMap<String, String> partitionKVs = alterTableOptions.getPartitionKVs();
    // it's altering partitions
    if (partitionKVs != null) {
        CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs);
        CatalogPartition catalogPartition = catalogManager.getPartition(tableIdentifier, partitionSpec).orElseThrow(() -> new ValidationException(String.format("Partition %s of table %s doesn't exist", partitionSpec.getPartitionSpec(), tableIdentifier)));
        Map<String, String> newProps = new HashMap<>(catalogPartition.getProperties());
        newProps.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
        return new AlterPartitionPropertiesOperation(tableIdentifier, partitionSpec, new CatalogPartitionImpl(newProps, catalogPartition.getComment()));
    } else {
        // it's altering a table
        Map<String, String> newOptions = new HashMap<>(oldTable.getOptions());
        newOptions.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
        return new AlterTableOptionsOperation(tableIdentifier, oldTable.copy(newOptions));
    }
}
Also used : AlterTableOptionsOperation(org.apache.flink.table.operations.ddl.AlterTableOptionsOperation) ValidationException(org.apache.flink.table.api.ValidationException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) AlterPartitionPropertiesOperation(org.apache.flink.table.operations.ddl.AlterPartitionPropertiesOperation) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl)

Example 7 with CatalogPartition

use of org.apache.flink.table.catalog.CatalogPartition in project flink by apache.

the class HiveCatalog method alterPartition.

@Override
public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException {
    checkNotNull(tablePath, "Table path cannot be null");
    checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be null");
    checkNotNull(newPartition, "New partition cannot be null");
    // the target doesn't exist
    try {
        Table hiveTable = getHiveTable(tablePath);
        boolean isHiveTable = isHiveTable(hiveTable.getParameters());
        if (!isHiveTable) {
            throw new CatalogException("Currently only supports partition for hive tables");
        }
        Partition hivePartition = getHivePartition(hiveTable, partitionSpec);
        if (hivePartition == null) {
            if (ignoreIfNotExists) {
                return;
            }
            throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
        }
        AlterTableOp op = HiveTableUtil.extractAlterTableOp(newPartition.getProperties());
        if (op == null) {
            throw new CatalogException(ALTER_TABLE_OP + " is missing for alter table operation");
        }
        alterTableViaProperties(op, null, null, hivePartition.getParameters(), newPartition.getProperties(), hivePartition.getSd());
        client.alter_partition(tablePath.getDatabaseName(), tablePath.getObjectName(), hivePartition);
    } catch (NoSuchObjectException e) {
        if (!ignoreIfNotExists) {
            throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
        }
    } catch (InvalidOperationException | MetaException | TableNotExistException | PartitionSpecInvalidException e) {
        throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
    } catch (TException e) {
        throw new CatalogException(String.format("Failed to alter existing partition with new partition %s of table %s", partitionSpec, tablePath), e);
    }
}
Also used : TException(org.apache.thrift.TException) Partition(org.apache.hadoop.hive.metastore.api.Partition) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) Table(org.apache.hadoop.hive.metastore.api.Table) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) TableNotExistException(org.apache.flink.table.catalog.exceptions.TableNotExistException) CatalogException(org.apache.flink.table.catalog.exceptions.CatalogException) AlterTableOp(org.apache.flink.sql.parser.hive.ddl.SqlAlterHiveTable.AlterTableOp) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) PartitionNotExistException(org.apache.flink.table.catalog.exceptions.PartitionNotExistException) PartitionSpecInvalidException(org.apache.flink.table.catalog.exceptions.PartitionSpecInvalidException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 8 with CatalogPartition

use of org.apache.flink.table.catalog.CatalogPartition in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableAddParts.

/**
 * Add one or more partitions to a table. Useful when the data has been copied to the right
 * location by some other process.
 */
private Operation convertAlterTableAddParts(String[] qualified, CommonTree ast) {
    // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists?
    // alterStatementSuffixAddPartitionsElement+)
    boolean ifNotExists = ast.getChild(0).getType() == HiveASTParser.TOK_IFNOTEXISTS;
    Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
    boolean isView = tab.isView();
    validateAlterTableType(tab);
    int numCh = ast.getChildCount();
    int start = ifNotExists ? 1 : 0;
    String currentLocation = null;
    Map<String, String> currentPartSpec = null;
    // Parser has done some verification, so the order of tokens doesn't need to be verified
    // here.
    List<CatalogPartitionSpec> specs = new ArrayList<>();
    List<CatalogPartition> partitions = new ArrayList<>();
    for (int num = start; num < numCh; num++) {
        HiveParserASTNode child = (HiveParserASTNode) ast.getChild(num);
        switch(child.getToken().getType()) {
            case HiveASTParser.TOK_PARTSPEC:
                if (currentPartSpec != null) {
                    specs.add(new CatalogPartitionSpec(currentPartSpec));
                    Map<String, String> props = new HashMap<>();
                    if (currentLocation != null) {
                        props.put(TABLE_LOCATION_URI, currentLocation);
                    }
                    partitions.add(new CatalogPartitionImpl(props, null));
                    currentLocation = null;
                }
                currentPartSpec = getPartSpec(child);
                // validate reserved values
                validatePartitionValues(currentPartSpec);
                break;
            case HiveASTParser.TOK_PARTITIONLOCATION:
                // if location specified, set in partition
                if (isView) {
                    throw new ValidationException("LOCATION clause illegal for view partition");
                }
                currentLocation = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
                break;
            default:
                throw new ValidationException("Unknown child: " + child);
        }
    }
    // add the last one
    if (currentPartSpec != null) {
        specs.add(new CatalogPartitionSpec(currentPartSpec));
        Map<String, String> props = new HashMap<>();
        if (currentLocation != null) {
            props.put(TABLE_LOCATION_URI, currentLocation);
        }
        partitions.add(new CatalogPartitionImpl(props, null));
    }
    ObjectIdentifier tableIdentifier = tab.getDbName() == null ? parseObjectIdentifier(tab.getTableName()) : catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(tab.getDbName(), tab.getTableName()));
    return new AddPartitionsOperation(tableIdentifier, ifNotExists, specs, partitions);
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) Table(org.apache.hadoop.hive.ql.metadata.Table) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) ValidationException(org.apache.flink.table.api.ValidationException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) NotNullConstraint(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) CatalogPartition(org.apache.flink.table.catalog.CatalogPartition) AddPartitionsOperation(org.apache.flink.table.operations.ddl.AddPartitionsOperation) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Aggregations

CatalogPartition (org.apache.flink.table.catalog.CatalogPartition)8 HashMap (java.util.HashMap)5 CatalogPartitionImpl (org.apache.flink.table.catalog.CatalogPartitionImpl)5 CatalogPartitionSpec (org.apache.flink.table.catalog.CatalogPartitionSpec)5 CatalogTable (org.apache.flink.table.catalog.CatalogTable)4 LinkedHashMap (java.util.LinkedHashMap)3 ValidationException (org.apache.flink.table.api.ValidationException)3 ObjectPath (org.apache.flink.table.catalog.ObjectPath)3 CatalogException (org.apache.flink.table.catalog.exceptions.CatalogException)3 TableNotExistException (org.apache.flink.table.catalog.exceptions.TableNotExistException)3 AlterPartitionPropertiesOperation (org.apache.flink.table.operations.ddl.AlterPartitionPropertiesOperation)3 AlterTableOptionsOperation (org.apache.flink.table.operations.ddl.AlterTableOptionsOperation)3 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2 SqlCreateHiveTable (org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable)2 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)2 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)2 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)2 PartitionNotExistException (org.apache.flink.table.catalog.exceptions.PartitionNotExistException)2 PartitionSpecInvalidException (org.apache.flink.table.catalog.exceptions.PartitionSpecInvalidException)2