Search in sources :

Example 76 with TableSchema

use of org.apache.flink.table.api.TableSchema in project flink by apache.

the class LookupJoinJsonPlanTest method testLegacyTableSourceException.

@Test
public void testLegacyTableSourceException() {
    TableSchema tableSchema = TableSchema.builder().field("id", Types.INT).field("name", Types.STRING).field("age", Types.INT).build();
    InMemoryLookupableTableSource.createTemporaryTable(tEnv, false, JavaConverters.asScalaIteratorConverter(new ArrayList<Row>().iterator()).asScala().toList(), tableSchema, "LookupTable", true);
    String sinkTableDdl = "CREATE TABLE MySink (\n" + "  a int,\n" + "  b varchar," + "  c bigint," + "  proctime timestamp(3)," + "  rowtime timestamp(3)," + "  id int," + "  name varchar," + "  age int" + ") with (\n" + "  'connector' = 'values',\n" + "  'table-sink-class' = 'DEFAULT')";
    tEnv.executeSql(sinkTableDdl);
    assertThatThrownBy(() -> util.verifyJsonPlan("INSERT INTO MySink SELECT * FROM MyTable AS T JOIN LookupTable " + "FOR SYSTEM_TIME AS OF T.proctime AS D ON T.a = D.id")).satisfies(anyCauseMatches(ValidationException.class, "TemporalTableSourceSpec can not be serialized."));
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 77 with TableSchema

use of org.apache.flink.table.api.TableSchema in project flink by apache.

the class TableSchemaUtilsTest method testDropConstraint.

@Test
public void testDropConstraint() {
    TableSchema originalSchema = TableSchema.builder().field("a", DataTypes.INT().notNull()).field("b", DataTypes.STRING()).field("c", DataTypes.INT(), "a + 1").field("t", DataTypes.TIMESTAMP(3)).primaryKey("ct1", new String[] { "a" }).watermark("t", "t", DataTypes.TIMESTAMP(3)).build();
    TableSchema newSchema = TableSchemaUtils.dropConstraint(originalSchema, "ct1");
    TableSchema expectedSchema = TableSchema.builder().field("a", DataTypes.INT().notNull()).field("b", DataTypes.STRING()).field("c", DataTypes.INT(), "a + 1").field("t", DataTypes.TIMESTAMP(3)).watermark("t", "t", DataTypes.TIMESTAMP(3)).build();
    assertEquals(expectedSchema, newSchema);
    // Drop non-exist constraint.
    exceptionRule.expect(ValidationException.class);
    exceptionRule.expectMessage("Constraint ct2 to drop does not exist");
    TableSchemaUtils.dropConstraint(originalSchema, "ct2");
}
Also used : TableSchema(org.apache.flink.table.api.TableSchema) Test(org.junit.Test)

Example 78 with TableSchema

use of org.apache.flink.table.api.TableSchema in project flink by apache.

the class OperationConverterUtils method changeColumn.

// change a column in the old table schema and return the updated table schema
public static TableSchema changeColumn(TableSchema oldSchema, String oldName, TableColumn newTableColumn, boolean first, String after) {
    int oldIndex = Arrays.asList(oldSchema.getFieldNames()).indexOf(oldName);
    if (oldIndex < 0) {
        throw new ValidationException(String.format("Old column %s not found for CHANGE COLUMN", oldName));
    }
    List<TableColumn> tableColumns = oldSchema.getTableColumns();
    if ((!first && after == null) || oldName.equals(after)) {
        tableColumns.set(oldIndex, newTableColumn);
    } else {
        // need to change column position
        tableColumns.remove(oldIndex);
        if (first) {
            tableColumns.add(0, newTableColumn);
        } else {
            int newIndex = tableColumns.stream().map(TableColumn::getName).collect(Collectors.toList()).indexOf(after);
            if (newIndex < 0) {
                throw new ValidationException(String.format("After column %s not found for CHANGE COLUMN", after));
            }
            tableColumns.add(newIndex + 1, newTableColumn);
        }
    }
    TableSchema.Builder builder = TableSchema.builder();
    for (TableColumn column : tableColumns) {
        builder.add(column);
    }
    setWatermarkAndPK(builder, oldSchema);
    return builder.build();
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) TableColumn(org.apache.flink.table.api.TableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn)

Example 79 with TableSchema

use of org.apache.flink.table.api.TableSchema in project flink by apache.

the class OperationConverterUtils method convertAddReplaceColumns.

public static Operation convertAddReplaceColumns(ObjectIdentifier tableIdentifier, SqlAddReplaceColumns addReplaceColumns, CatalogTable catalogTable, SqlValidator sqlValidator) {
    // This is only used by the Hive dialect at the moment. In Hive, only non-partition columns
    // can be
    // added/replaced and users will only define non-partition columns in the new column list.
    // Therefore, we require
    // that partitions columns must appear last in the schema (which is inline with Hive).
    // Otherwise, we won't be
    // able to determine the column positions after the non-partition columns are replaced.
    TableSchema oldSchema = catalogTable.getSchema();
    int numPartCol = catalogTable.getPartitionKeys().size();
    Set<String> lastCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount()).stream().map(TableColumn::getName).collect(Collectors.toSet());
    if (!lastCols.equals(new HashSet<>(catalogTable.getPartitionKeys()))) {
        throw new ValidationException("ADD/REPLACE COLUMNS on partitioned tables requires partition columns to appear last");
    }
    // set non-partition columns
    TableSchema.Builder builder = TableSchema.builder();
    if (!addReplaceColumns.isReplace()) {
        List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
        for (TableColumn column : nonPartCols) {
            builder.add(column);
        }
        setWatermarkAndPK(builder, catalogTable.getSchema());
    }
    for (SqlNode sqlNode : addReplaceColumns.getNewColumns()) {
        builder.add(toTableColumn((SqlTableColumn) sqlNode, sqlValidator));
    }
    // set partition columns
    List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
    for (TableColumn column : partCols) {
        builder.add(column);
    }
    // set properties
    Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
    newProperties.putAll(extractProperties(addReplaceColumns.getProperties()));
    return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) HashMap(java.util.HashMap) TableColumn(org.apache.flink.table.api.TableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn) SqlTableColumn(org.apache.flink.sql.parser.ddl.SqlTableColumn) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) AlterTableSchemaOperation(org.apache.flink.table.operations.ddl.AlterTableSchemaOperation) HashSet(java.util.HashSet) SqlNode(org.apache.calcite.sql.SqlNode)

Example 80 with TableSchema

use of org.apache.flink.table.api.TableSchema in project flink by apache.

the class SqlToOperationConverter method convertAlterTable.

/**
 * convert ALTER TABLE statement.
 */
private Operation convertAlterTable(SqlAlterTable sqlAlterTable) {
    UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlAlterTable.fullTableName());
    ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
    Optional<ContextResolvedTable> optionalCatalogTable = catalogManager.getTable(tableIdentifier);
    if (!optionalCatalogTable.isPresent() || optionalCatalogTable.get().isTemporary()) {
        throw new ValidationException(String.format("Table %s doesn't exist or is a temporary table.", tableIdentifier));
    }
    CatalogBaseTable baseTable = optionalCatalogTable.get().getTable();
    if (baseTable instanceof CatalogView) {
        throw new ValidationException("ALTER TABLE for a view is not allowed");
    }
    if (sqlAlterTable instanceof SqlAlterTableRename) {
        UnresolvedIdentifier newUnresolvedIdentifier = UnresolvedIdentifier.of(((SqlAlterTableRename) sqlAlterTable).fullNewTableName());
        ObjectIdentifier newTableIdentifier = catalogManager.qualifyIdentifier(newUnresolvedIdentifier);
        return new AlterTableRenameOperation(tableIdentifier, newTableIdentifier);
    } else if (sqlAlterTable instanceof SqlAlterTableOptions) {
        return convertAlterTableOptions(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableOptions) sqlAlterTable);
    } else if (sqlAlterTable instanceof SqlAlterTableReset) {
        return convertAlterTableReset(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableReset) sqlAlterTable);
    } else if (sqlAlterTable instanceof SqlAlterTableAddConstraint) {
        SqlTableConstraint constraint = ((SqlAlterTableAddConstraint) sqlAlterTable).getConstraint();
        validateTableConstraint(constraint);
        TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
        // Sanity check for constraint.
        TableSchema.Builder builder = TableSchemaUtils.builderWithGivenSchema(oriSchema);
        if (constraint.getConstraintName().isPresent()) {
            builder.primaryKey(constraint.getConstraintName().get(), constraint.getColumnNames());
        } else {
            builder.primaryKey(constraint.getColumnNames());
        }
        builder.build();
        return new AlterTableAddConstraintOperation(tableIdentifier, constraint.getConstraintName().orElse(null), constraint.getColumnNames());
    } else if (sqlAlterTable instanceof SqlAlterTableDropConstraint) {
        SqlAlterTableDropConstraint dropConstraint = ((SqlAlterTableDropConstraint) sqlAlterTable);
        String constraintName = dropConstraint.getConstraintName().getSimple();
        TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
        if (!oriSchema.getPrimaryKey().filter(pk -> pk.getName().equals(constraintName)).isPresent()) {
            throw new ValidationException(String.format("CONSTRAINT [%s] does not exist", constraintName));
        }
        return new AlterTableDropConstraintOperation(tableIdentifier, constraintName);
    } else if (sqlAlterTable instanceof SqlAddReplaceColumns) {
        return OperationConverterUtils.convertAddReplaceColumns(tableIdentifier, (SqlAddReplaceColumns) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
    } else if (sqlAlterTable instanceof SqlChangeColumn) {
        return OperationConverterUtils.convertChangeColumn(tableIdentifier, (SqlChangeColumn) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
    } else if (sqlAlterTable instanceof SqlAddPartitions) {
        List<CatalogPartitionSpec> specs = new ArrayList<>();
        List<CatalogPartition> partitions = new ArrayList<>();
        SqlAddPartitions addPartitions = (SqlAddPartitions) sqlAlterTable;
        for (int i = 0; i < addPartitions.getPartSpecs().size(); i++) {
            specs.add(new CatalogPartitionSpec(addPartitions.getPartitionKVs(i)));
            Map<String, String> props = OperationConverterUtils.extractProperties(addPartitions.getPartProps().get(i));
            partitions.add(new CatalogPartitionImpl(props, null));
        }
        return new AddPartitionsOperation(tableIdentifier, addPartitions.ifNotExists(), specs, partitions);
    } else if (sqlAlterTable instanceof SqlDropPartitions) {
        SqlDropPartitions dropPartitions = (SqlDropPartitions) sqlAlterTable;
        List<CatalogPartitionSpec> specs = new ArrayList<>();
        for (int i = 0; i < dropPartitions.getPartSpecs().size(); i++) {
            specs.add(new CatalogPartitionSpec(dropPartitions.getPartitionKVs(i)));
        }
        return new DropPartitionsOperation(tableIdentifier, dropPartitions.ifExists(), specs);
    } else if (sqlAlterTable instanceof SqlAlterTableCompact) {
        return convertAlterTableCompact(tableIdentifier, optionalCatalogTable.get(), (SqlAlterTableCompact) sqlAlterTable);
    } else {
        throw new ValidationException(String.format("[%s] needs to implement", sqlAlterTable.toSqlString(CalciteSqlDialect.DEFAULT)));
    }
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) TableSchema(org.apache.flink.table.api.TableSchema) SqlAlterTableReset(org.apache.flink.sql.parser.ddl.SqlAlterTableReset) ArrayList(java.util.ArrayList) AlterTableAddConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableAddConstraintOperation) SqlAlterTableRename(org.apache.flink.sql.parser.ddl.SqlAlterTableRename) AlterTableDropConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableDropConstraintOperation) SqlAddReplaceColumns(org.apache.flink.sql.parser.ddl.SqlAddReplaceColumns) SqlAddPartitions(org.apache.flink.sql.parser.ddl.SqlAddPartitions) SqlAlterTableAddConstraint(org.apache.flink.sql.parser.ddl.SqlAlterTableAddConstraint) SqlAlterTableCompact(org.apache.flink.sql.parser.ddl.SqlAlterTableCompact) ArrayList(java.util.ArrayList) List(java.util.List) SqlNodeList(org.apache.calcite.sql.SqlNodeList) SqlTableConstraint(org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint) CatalogView(org.apache.flink.table.catalog.CatalogView) AddPartitionsOperation(org.apache.flink.table.operations.ddl.AddPartitionsOperation) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) DropPartitionsOperation(org.apache.flink.table.operations.ddl.DropPartitionsOperation) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) UnresolvedIdentifier(org.apache.flink.table.catalog.UnresolvedIdentifier) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) SqlAlterTableAddConstraint(org.apache.flink.sql.parser.ddl.SqlAlterTableAddConstraint) SqlTableConstraint(org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint) RelHint(org.apache.calcite.rel.hint.RelHint) SqlAlterTableDropConstraint(org.apache.flink.sql.parser.ddl.SqlAlterTableDropConstraint) SqlChangeColumn(org.apache.flink.sql.parser.ddl.SqlChangeColumn) SqlAlterTableOptions(org.apache.flink.sql.parser.ddl.SqlAlterTableOptions) AlterTableRenameOperation(org.apache.flink.table.operations.ddl.AlterTableRenameOperation) SqlAlterTableDropConstraint(org.apache.flink.sql.parser.ddl.SqlAlterTableDropConstraint) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CatalogPartitionImpl(org.apache.flink.table.catalog.CatalogPartitionImpl) SqlDropPartitions(org.apache.flink.sql.parser.ddl.SqlDropPartitions)

Aggregations

TableSchema (org.apache.flink.table.api.TableSchema)86 Test (org.junit.Test)54 HashMap (java.util.HashMap)26 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)21 SqlNode (org.apache.calcite.sql.SqlNode)19 ObjectPath (org.apache.flink.table.catalog.ObjectPath)19 CatalogTable (org.apache.flink.table.catalog.CatalogTable)18 DataType (org.apache.flink.table.types.DataType)16 ValidationException (org.apache.flink.table.api.ValidationException)14 TableColumn (org.apache.flink.table.api.TableColumn)10 UniqueConstraint (org.apache.flink.table.api.constraints.UniqueConstraint)10 ArrayList (java.util.ArrayList)9 List (java.util.List)9 Map (java.util.Map)9 FeatureOption (org.apache.flink.sql.parser.ddl.SqlTableLike.FeatureOption)9 MergingStrategy (org.apache.flink.sql.parser.ddl.SqlTableLike.MergingStrategy)9 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)8 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)8 Arrays (java.util.Arrays)7 Configuration (org.apache.flink.configuration.Configuration)7