Search in sources :

Example 26 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by splunk.

the class OperationConverterUtils method convertAddReplaceColumns.

public static Operation convertAddReplaceColumns(ObjectIdentifier tableIdentifier, SqlAddReplaceColumns addReplaceColumns, CatalogTable catalogTable, SqlValidator sqlValidator) {
    // This is only used by the Hive dialect at the moment. In Hive, only non-partition columns
    // can be
    // added/replaced and users will only define non-partition columns in the new column list.
    // Therefore, we require
    // that partitions columns must appear last in the schema (which is inline with Hive).
    // Otherwise, we won't be
    // able to determine the column positions after the non-partition columns are replaced.
    TableSchema oldSchema = catalogTable.getSchema();
    int numPartCol = catalogTable.getPartitionKeys().size();
    Set<String> lastCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount()).stream().map(TableColumn::getName).collect(Collectors.toSet());
    if (!lastCols.equals(new HashSet<>(catalogTable.getPartitionKeys()))) {
        throw new ValidationException("ADD/REPLACE COLUMNS on partitioned tables requires partition columns to appear last");
    }
    // set non-partition columns
    TableSchema.Builder builder = TableSchema.builder();
    if (!addReplaceColumns.isReplace()) {
        List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
        for (TableColumn column : nonPartCols) {
            builder.add(column);
        }
        setWatermarkAndPK(builder, catalogTable.getSchema());
    }
    for (SqlNode sqlNode : addReplaceColumns.getNewColumns()) {
        builder.add(toTableColumn((SqlTableColumn) sqlNode, sqlValidator));
    }
    // set partition columns
    List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
    for (TableColumn column : partCols) {
        builder.add(column);
    }
    // set properties
    Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
    newProperties.putAll(extractProperties(addReplaceColumns.getProperties()));
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) HashMap(java.util.HashMap) TableColumn(org.apache.flink.table.api.TableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) HashSet(java.util.HashSet) SqlNode(org.apache.calcite.sql.SqlNode)

Example 27 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by splunk.

the class OperationConverterUtils method convertChangeColumn.

public static Operation convertChangeColumn(ObjectIdentifier tableIdentifier, SqlChangeColumn changeColumn, CatalogTable catalogTable, SqlValidator sqlValidator) {
    String oldName = changeColumn.getOldName().getSimple();
    if (catalogTable.getPartitionKeys().indexOf(oldName) >= 0) {
        // disallow changing partition columns
        throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
    }
    TableSchema oldSchema = catalogTable.getSchema();
    boolean first = changeColumn.isFirst();
    String after = changeColumn.getAfter() == null ? null : changeColumn.getAfter().getSimple();
    TableColumn newTableColumn = toTableColumn(changeColumn.getNewColumn(), sqlValidator);
    TableSchema newSchema = changeColumn(oldSchema, oldName, newTableColumn, first, after);
    Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
    newProperties.putAll(extractProperties(changeColumn.getProperties()));
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
// TODO: handle watermark and constraints
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) HashMap(java.util.HashMap) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) TableColumn(org.apache.flink.table.api.TableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn)

Example 28 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink-mirror by flink-ci.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableModifyCols.

private Operation convertAlterTableModifyCols(CatalogBaseTable alteredTable, String tblName, HiveParserASTNode ast, boolean replace) throws SemanticException {
    List<FieldSchema> newCols = HiveParserBaseSemanticAnalyzer.getColumns((HiveParserASTNode) ast.getChild(0));
    boolean isCascade = false;
    if (null != ast.getFirstChildWithType(HiveASTParser.TOK_CASCADE)) {
        isCascade = true;
    }
    ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
    CatalogTable oldTable = (CatalogTable) alteredTable;
    // prepare properties
    Map<String, String> props = new HashMap<>(oldTable.getOptions());
    props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
    if (isCascade) {
        props.put(ALTER_COL_CASCADE, "true");
    }
    TableSchema oldSchema = oldTable.getSchema();
    final int numPartCol = oldTable.getPartitionKeys().size();
    TableSchema.Builder builder = TableSchema.builder();
    // add existing non-part col if we're not replacing
    if (!replace) {
        List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
        for (TableColumn column : nonPartCols) {
            builder.add(column);
        }
        setWatermarkAndPK(builder, oldSchema);
    }
    // add new cols
    for (FieldSchema col : newCols) {
        builder.add(TableColumn.physical(col.getName(), HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(col.getType()))));
    }
    // add part cols
    List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
    for (TableColumn column : partCols) {
        builder.add(column);
    }
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
Also used : TableSchema(org.apache.flink.table.api.TableSchema) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) CatalogTable(org.apache.flink.table.catalog.CatalogTable) TableColumn(org.apache.flink.table.api.TableColumn) NotNullConstraint(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Aggregations

TableColumn (org.apache.flink.table.api.TableColumn)28 TableSchema (org.apache.flink.table.api.TableSchema)25 HashMap (java.util.HashMap)15 ValidationException (org.apache.flink.table.api.ValidationException)12 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)12 AlterTableSchemaOperation (org.apache.flink.table.operations.ddl.AlterTableSchemaOperation)12 SqlTableColumn (org.apache.flink.sql.parser.ddl.SqlTableColumn)9 UniqueConstraint (org.apache.flink.table.api.constraints.UniqueConstraint)7 LinkedHashMap (java.util.LinkedHashMap)6 CatalogTable (org.apache.flink.table.catalog.CatalogTable)6 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)6 NotNullConstraint (org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint)6 DataType (org.apache.flink.table.types.DataType)6 ArrayList (java.util.ArrayList)4 HashSet (java.util.HashSet)3 SqlNode (org.apache.calcite.sql.SqlNode)3 SqlCreateHiveTable (org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable)3 TableException (org.apache.flink.table.api.TableException)3 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)3 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)3