Search in sources :

Example 1 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableModifyCols.

private Operation convertAlterTableModifyCols(CatalogBaseTable alteredTable, String tblName, HiveParserASTNode ast, boolean replace) throws SemanticException {
    List<FieldSchema> newCols = HiveParserBaseSemanticAnalyzer.getColumns((HiveParserASTNode) ast.getChild(0));
    boolean isCascade = false;
    if (null != ast.getFirstChildWithType(HiveASTParser.TOK_CASCADE)) {
        isCascade = true;
    }
    ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
    CatalogTable oldTable = (CatalogTable) alteredTable;
    // prepare properties
    Map<String, String> props = new HashMap<>(oldTable.getOptions());
    props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
    if (isCascade) {
        props.put(ALTER_COL_CASCADE, "true");
    }
    TableSchema oldSchema = oldTable.getSchema();
    final int numPartCol = oldTable.getPartitionKeys().size();
    TableSchema.Builder builder = TableSchema.builder();
    // add existing non-part col if we're not replacing
    if (!replace) {
        List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
        for (TableColumn column : nonPartCols) {
            builder.add(column);
        }
        setWatermarkAndPK(builder, oldSchema);
    }
    // add new cols
    for (FieldSchema col : newCols) {
        builder.add(TableColumn.physical(col.getName(), HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(col.getType()))));
    }
    // add part cols
    List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
    for (TableColumn column : partCols) {
        builder.add(column);
    }
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
Also used : TableSchema(org.apache.flink.table.api.TableSchema) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) CatalogTable(org.apache.flink.table.catalog.CatalogTable) TableColumn(org.apache.flink.table.api.TableColumn) NotNullConstraint(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Example 2 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by apache.

the class TableFormatFactoryBase method deriveSchema.

// --------------------------------------------------------------------------------------------
/**
 * Finds the table schema that can be used for a format schema (without time attributes and
 * generated columns).
 */
public static TableSchema deriveSchema(Map<String, String> properties) {
    final DescriptorProperties descriptorProperties = new DescriptorProperties();
    descriptorProperties.putProperties(properties);
    final TableSchema.Builder builder = TableSchema.builder();
    final TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
    for (int i = 0; i < tableSchema.getFieldCount(); i++) {
        final TableColumn tableColumn = tableSchema.getTableColumns().get(i);
        final String fieldName = tableColumn.getName();
        final DataType dataType = tableColumn.getType();
        if (!tableColumn.isPhysical()) {
            // skip non-physical columns
            continue;
        }
        final boolean isProctime = descriptorProperties.getOptionalBoolean(SCHEMA + '.' + i + '.' + SCHEMA_PROCTIME).orElse(false);
        final String timestampKey = SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_TYPE;
        final boolean isRowtime = descriptorProperties.containsKey(timestampKey);
        if (!isProctime && !isRowtime) {
            // check for aliasing
            final String aliasName = descriptorProperties.getOptionalString(SCHEMA + '.' + i + '.' + SCHEMA_FROM).orElse(fieldName);
            builder.field(aliasName, dataType);
        } else // only use the rowtime attribute if it references a field
        if (isRowtime && descriptorProperties.isValue(timestampKey, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD)) {
            final String aliasName = descriptorProperties.getString(SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_FROM);
            builder.field(aliasName, dataType);
        }
    }
    return builder.build();
}
Also used : TableSchema(org.apache.flink.table.api.TableSchema) DescriptorProperties(org.apache.flink.table.descriptors.DescriptorProperties) DataType(org.apache.flink.table.types.DataType) TableColumn(org.apache.flink.table.api.TableColumn)

Example 3 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by apache.

the class OperationConverterUtils method convertChangeColumn.

public static Operation convertChangeColumn(ObjectIdentifier tableIdentifier, SqlChangeColumn changeColumn, CatalogTable catalogTable, SqlValidator sqlValidator) {
    String oldName = changeColumn.getOldName().getSimple();
    if (catalogTable.getPartitionKeys().indexOf(oldName) >= 0) {
        // disallow changing partition columns
        throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
    }
    TableSchema oldSchema = catalogTable.getSchema();
    boolean first = changeColumn.isFirst();
    String after = changeColumn.getAfter() == null ? null : changeColumn.getAfter().getSimple();
    TableColumn newTableColumn = toTableColumn(changeColumn.getNewColumn(), sqlValidator);
    TableSchema newSchema = changeColumn(oldSchema, oldName, newTableColumn, first, after);
    Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
    newProperties.putAll(extractProperties(changeColumn.getProperties()));
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
// TODO: handle watermark and constraints
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) HashMap(java.util.HashMap) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) TableColumn(org.apache.flink.table.api.TableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn)

Example 4 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableChangeCol.

private Operation convertAlterTableChangeCol(CatalogBaseTable alteredTable, String[] qualified, HiveParserASTNode ast) throws SemanticException {
    String newComment = null;
    boolean first = false;
    String flagCol = null;
    boolean isCascade = false;
    // col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name]
    // [CASCADE|RESTRICT]
    String oldColName = ast.getChild(0).getText();
    String newColName = ast.getChild(1).getText();
    String newType = HiveParserBaseSemanticAnalyzer.getTypeStringFromAST((HiveParserASTNode) ast.getChild(2));
    int childCount = ast.getChildCount();
    for (int i = 3; i < childCount; i++) {
        HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
        switch(child.getToken().getType()) {
            case HiveASTParser.StringLiteral:
                newComment = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getText());
                break;
            case HiveASTParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
                flagCol = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
                break;
            case HiveASTParser.KW_FIRST:
                first = true;
                break;
            case HiveASTParser.TOK_CASCADE:
                isCascade = true;
                break;
            case HiveASTParser.TOK_RESTRICT:
                break;
            default:
                throw new ValidationException("Unsupported token: " + child.getToken() + " for alter table");
        }
    }
    // Validate the operation of renaming a column name.
    Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
    SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo();
    if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) && skewInfo.getSkewedColNames().contains(oldColName)) {
        throw new ValidationException(oldColName + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
    }
    String tblName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
    ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
    CatalogTable oldTable = (CatalogTable) alteredTable;
    String oldName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(oldColName);
    String newName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(newColName);
    if (oldTable.getPartitionKeys().contains(oldName)) {
        // disallow changing partition columns
        throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
    }
    TableSchema oldSchema = oldTable.getSchema();
    TableColumn newTableColumn = TableColumn.physical(newName, HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(newType)));
    TableSchema newSchema = OperationConverterUtils.changeColumn(oldSchema, oldName, newTableColumn, first, flagCol);
    Map<String, String> props = new HashMap<>(oldTable.getOptions());
    props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
    if (isCascade) {
        props.put(ALTER_COL_CASCADE, "true");
    }
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) ValidationException(org.apache.flink.table.api.ValidationException) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) Table(org.apache.hadoop.hive.ql.metadata.Table) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) TableSchema(org.apache.flink.table.api.TableSchema) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CatalogTable(org.apache.flink.table.catalog.CatalogTable) TableColumn(org.apache.flink.table.api.TableColumn) NotNullConstraint(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Example 5 with TableColumn

use of org.apache.flink.table.api.TableColumn in project flink by apache.

the class SchemaValidator method deriveTableSinkSchema.

/**
 * Derives the table schema for a table sink. A sink ignores a proctime attribute and needs to
 * track the origin of a rowtime field.
 *
 * @deprecated This method combines two separate concepts of table schema and field mapping.
 *     This should be split into two methods once we have support for the corresponding
 *     interfaces (see FLINK-9870).
 */
@Deprecated
public static TableSchema deriveTableSinkSchema(DescriptorProperties properties) {
    TableSchema.Builder builder = TableSchema.builder();
    TableSchema tableSchema = properties.getTableSchema(SCHEMA);
    for (int i = 0; i < tableSchema.getFieldCount(); i++) {
        final TableColumn tableColumn = tableSchema.getTableColumns().get(i);
        final String fieldName = tableColumn.getName();
        final DataType dataType = tableColumn.getType();
        if (!tableColumn.isPhysical()) {
            // skip non-physical column
            continue;
        }
        boolean isProctime = properties.getOptionalBoolean(SCHEMA + "." + i + "." + SCHEMA_PROCTIME).orElse(false);
        String tsType = SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_TYPE;
        boolean isRowtime = properties.containsKey(tsType);
        if (!isProctime && !isRowtime) {
            // check for a aliasing
            String aliasName = properties.getOptionalString(SCHEMA + "." + i + "." + SCHEMA_FROM).orElse(fieldName);
            builder.field(aliasName, dataType);
        } else // only use the rowtime attribute if it references a field
        if (isRowtime) {
            switch(properties.getString(tsType)) {
                case ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD:
                    String field = properties.getString(SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_FROM);
                    builder.field(field, dataType);
                    break;
                // insert the timestamp into the output
                default:
                    throw new TableException(format("Unsupported rowtime type '%s' for sink" + " table schema. Currently only '%s' is supported for table sinks.", dataType, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD));
            }
        }
    }
    return builder.build();
}
Also used : TableException(org.apache.flink.table.api.TableException) TableSchema(org.apache.flink.table.api.TableSchema) DataType(org.apache.flink.table.types.DataType) TableColumn(org.apache.flink.table.api.TableColumn)

Aggregations

TableColumn (org.apache.flink.table.api.TableColumn)7 TableSchema (org.apache.flink.table.api.TableSchema)7 HashMap (java.util.HashMap)4 ValidationException (org.apache.flink.table.api.ValidationException)4 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)4 AlterTableSchemaOperation (org.apache.flink.table.operations.ddl.AlterTableSchemaOperation)4 SqlTableColumn (org.apache.flink.sql.parser.ddl.SqlTableColumn)3 LinkedHashMap (java.util.LinkedHashMap)2 UniqueConstraint (org.apache.flink.table.api.constraints.UniqueConstraint)2 CatalogTable (org.apache.flink.table.catalog.CatalogTable)2 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)2 NotNullConstraint (org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint)2 DataType (org.apache.flink.table.types.DataType)2 HashSet (java.util.HashSet)1 SqlNode (org.apache.calcite.sql.SqlNode)1 SqlCreateHiveTable (org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable)1 TableException (org.apache.flink.table.api.TableException)1 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)1 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)1 ObjectPath (org.apache.flink.table.catalog.ObjectPath)1