use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class LookupJoinJsonPlanTest method testLegacyTableSourceException.
@Test
public void testLegacyTableSourceException() {
TableSchema tableSchema = TableSchema.builder().field("id", Types.INT).field("name", Types.STRING).field("age", Types.INT).build();
InMemoryLookupableTableSource.createTemporaryTable(tEnv, false, JavaConverters.asScalaIteratorConverter(new ArrayList<Row>().iterator()).asScala().toList(), tableSchema, "LookupTable", true);
String sinkTableDdl = "CREATE TABLE MySink (\n" + " a int,\n" + " b varchar," + " c bigint," + " proctime timestamp(3)," + " rowtime timestamp(3)," + " id int," + " name varchar," + " age int" + ") with (\n" + " 'connector' = 'values',\n" + " 'table-sink-class' = 'DEFAULT')";
tEnv.executeSql(sinkTableDdl);
assertThatThrownBy(() -> util.verifyJsonPlan("INSERT INTO MySink SELECT * FROM MyTable AS T JOIN LookupTable " + "FOR SYSTEM_TIME AS OF T.proctime AS D ON T.a = D.id")).satisfies(anyCauseMatches(ValidationException.class, "TemporalTableSourceSpec can not be serialized."));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TableSchemaUtilsTest method testDropConstraint.
@Test
public void testDropConstraint() {
TableSchema originalSchema = TableSchema.builder().field("a", DataTypes.INT().notNull()).field("b", DataTypes.STRING()).field("c", DataTypes.INT(), "a + 1").field("t", DataTypes.TIMESTAMP(3)).primaryKey("ct1", new String[] { "a" }).watermark("t", "t", DataTypes.TIMESTAMP(3)).build();
TableSchema newSchema = TableSchemaUtils.dropConstraint(originalSchema, "ct1");
TableSchema expectedSchema = TableSchema.builder().field("a", DataTypes.INT().notNull()).field("b", DataTypes.STRING()).field("c", DataTypes.INT(), "a + 1").field("t", DataTypes.TIMESTAMP(3)).watermark("t", "t", DataTypes.TIMESTAMP(3)).build();
assertEquals(expectedSchema, newSchema);
// Drop non-exist constraint.
exceptionRule.expect(ValidationException.class);
exceptionRule.expectMessage("Constraint ct2 to drop does not exist");
TableSchemaUtils.dropConstraint(originalSchema, "ct2");
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class OperationConverterUtils method changeColumn.
// change a column in the old table schema and return the updated table schema
public static TableSchema changeColumn(TableSchema oldSchema, String oldName, TableColumn newTableColumn, boolean first, String after) {
int oldIndex = Arrays.asList(oldSchema.getFieldNames()).indexOf(oldName);
if (oldIndex < 0) {
throw new ValidationException(String.format("Old column %s not found for CHANGE COLUMN", oldName));
}
List<TableColumn> tableColumns = oldSchema.getTableColumns();
if ((!first && after == null) || oldName.equals(after)) {
tableColumns.set(oldIndex, newTableColumn);
} else {
// need to change column position
tableColumns.remove(oldIndex);
if (first) {
tableColumns.add(0, newTableColumn);
} else {
int newIndex = tableColumns.stream().map(TableColumn::getName).collect(Collectors.toList()).indexOf(after);
if (newIndex < 0) {
throw new ValidationException(String.format("After column %s not found for CHANGE COLUMN", after));
}
tableColumns.add(newIndex + 1, newTableColumn);
}
}
TableSchema.Builder builder = TableSchema.builder();
for (TableColumn column : tableColumns) {
builder.add(column);
}
setWatermarkAndPK(builder, oldSchema);
return builder.build();
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class OperationConverterUtils method convertAddReplaceColumns.
public static Operation convertAddReplaceColumns(ObjectIdentifier tableIdentifier, SqlAddReplaceColumns addReplaceColumns, CatalogTable catalogTable, SqlValidator sqlValidator) {
// This is only used by the Hive dialect at the moment. In Hive, only non-partition columns
// can be
// added/replaced and users will only define non-partition columns in the new column list.
// Therefore, we require
// that partitions columns must appear last in the schema (which is inline with Hive).
// Otherwise, we won't be
// able to determine the column positions after the non-partition columns are replaced.
TableSchema oldSchema = catalogTable.getSchema();
int numPartCol = catalogTable.getPartitionKeys().size();
Set<String> lastCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount()).stream().map(TableColumn::getName).collect(Collectors.toSet());
if (!lastCols.equals(new HashSet<>(catalogTable.getPartitionKeys()))) {
throw new ValidationException("ADD/REPLACE COLUMNS on partitioned tables requires partition columns to appear last");
}
// set non-partition columns
TableSchema.Builder builder = TableSchema.builder();
if (!addReplaceColumns.isReplace()) {
List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
for (TableColumn column : nonPartCols) {
builder.add(column);
}
setWatermarkAndPK(builder, catalogTable.getSchema());
}
for (SqlNode sqlNode : addReplaceColumns.getNewColumns()) {
builder.add(toTableColumn((SqlTableColumn) sqlNode, sqlValidator));
}
// set partition columns
List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
for (TableColumn column : partCols) {
builder.add(column);
}
// set properties
Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
newProperties.putAll(extractProperties(addReplaceColumns.getProperties()));
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class SqlToOperationConverter method convertAlterTable.
/**
* convert ALTER TABLE statement.
*/
private Operation convertAlterTable(SqlAlterTable sqlAlterTable) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlAlterTable.fullTableName());
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
Optional<ContextResolvedTable> optionalCatalogTable = catalogManager.getTable(tableIdentifier);
if (!optionalCatalogTable.isPresent() || optionalCatalogTable.get().isTemporary()) {
throw new ValidationException(String.format("Table %s doesn't exist or is a temporary table.", tableIdentifier));
}
CatalogBaseTable baseTable = optionalCatalogTable.get().getTable();
if (baseTable instanceof CatalogView) {
throw new ValidationException("ALTER TABLE for a view is not allowed");
}
if (sqlAlterTable instanceof SqlAlterTableRename) {
UnresolvedIdentifier newUnresolvedIdentifier = UnresolvedIdentifier.of(((SqlAlterTableRename) sqlAlterTable).fullNewTableName());
ObjectIdentifier newTableIdentifier = catalogManager.qualifyIdentifier(newUnresolvedIdentifier);
return new AlterTableRenameOperation(tableIdentifier, newTableIdentifier);
} else if (sqlAlterTable instanceof SqlAlterTableOptions) {
return convertAlterTableOptions(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableOptions) sqlAlterTable);
} else if (sqlAlterTable instanceof SqlAlterTableReset) {
return convertAlterTableReset(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableReset) sqlAlterTable);
} else if (sqlAlterTable instanceof SqlAlterTableAddConstraint) {
SqlTableConstraint constraint = ((SqlAlterTableAddConstraint) sqlAlterTable).getConstraint();
validateTableConstraint(constraint);
TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
// Sanity check for constraint.
TableSchema.Builder builder = TableSchemaUtils.builderWithGivenSchema(oriSchema);
if (constraint.getConstraintName().isPresent()) {
builder.primaryKey(constraint.getConstraintName().get(), constraint.getColumnNames());
} else {
builder.primaryKey(constraint.getColumnNames());
}
builder.build();
return new AlterTableAddConstraintOperation(tableIdentifier, constraint.getConstraintName().orElse(null), constraint.getColumnNames());
} else if (sqlAlterTable instanceof SqlAlterTableDropConstraint) {
SqlAlterTableDropConstraint dropConstraint = ((SqlAlterTableDropConstraint) sqlAlterTable);
String constraintName = dropConstraint.getConstraintName().getSimple();
TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
if (!oriSchema.getPrimaryKey().filter(pk -> pk.getName().equals(constraintName)).isPresent()) {
throw new ValidationException(String.format("CONSTRAINT [%s] does not exist", constraintName));
}
return new AlterTableDropConstraintOperation(tableIdentifier, constraintName);
} else if (sqlAlterTable instanceof SqlAddReplaceColumns) {
return OperationConverterUtils.convertAddReplaceColumns(tableIdentifier, (SqlAddReplaceColumns) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
} else if (sqlAlterTable instanceof SqlChangeColumn) {
return OperationConverterUtils.convertChangeColumn(tableIdentifier, (SqlChangeColumn) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
} else if (sqlAlterTable instanceof SqlAddPartitions) {
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
SqlAddPartitions addPartitions = (SqlAddPartitions) sqlAlterTable;
for (int i = 0; i < addPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(addPartitions.getPartitionKVs(i)));
Map<String, String> props = OperationConverterUtils.extractProperties(addPartitions.getPartProps().get(i));
partitions.add(new CatalogPartitionImpl(props, null));
}
return new AddPartitionsOperation(tableIdentifier, addPartitions.ifNotExists(), specs, partitions);
} else if (sqlAlterTable instanceof SqlDropPartitions) {
SqlDropPartitions dropPartitions = (SqlDropPartitions) sqlAlterTable;
List<CatalogPartitionSpec> specs = new ArrayList<>();
for (int i = 0; i < dropPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(dropPartitions.getPartitionKVs(i)));
}
return new DropPartitionsOperation(tableIdentifier, dropPartitions.ifExists(), specs);
} else if (sqlAlterTable instanceof SqlAlterTableCompact) {
return convertAlterTableCompact(tableIdentifier, optionalCatalogTable.get(), (SqlAlterTableCompact) sqlAlterTable);
} else {
throw new ValidationException(String.format("[%s] needs to implement", sqlAlterTable.toSqlString(CalciteSqlDialect.DEFAULT)));
}
}
Aggregations