use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableLikeNestedWatermark.
@Test
public void testCreateTableLikeNestedWatermark() {
CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.ROW(DataTypes.FIELD("tmstmp", DataTypes.TIMESTAMP(3)))).build(), null, Collections.emptyList(), Collections.emptyMap());
catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
final String sql = "create table derivedTable(\n" + " a int,\n" + " watermark for f1.t as f1.t - interval '5' second\n" + ")\n" + "like sourceTable";
assertThatThrownBy(() -> parseAndConvert(sql)).isInstanceOf(ValidationException.class).hasMessageContaining("The rowtime attribute field 'f1.t' is not defined in the table schema," + " at line 3, column 20\n" + "Nested field 't' was not found in a composite type:" + " ROW<`tmstmp` TIMESTAMP(3)>.");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableWithWatermark.
@Test
public void testCreateTableWithWatermark() throws FunctionAlreadyExistException, DatabaseNotExistException {
CatalogFunction cf = new CatalogFunctionImpl(JavaUserDefinedScalarFunctions.JavaFunc5.class.getName());
catalog.createFunction(ObjectPath.fromString("default.myfunc"), cf, true);
final String sql = "create table source_table(\n" + " a int,\n" + " b bigint,\n" + " c timestamp(3),\n" + " watermark for `c` as myfunc(c, 1) - interval '5' second\n" + ") with (\n" + " 'connector.type' = 'kafka')\n";
final FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
SqlNode node = parser.parse(sql);
assertThat(node).isInstanceOf(SqlCreateTable.class);
Operation operation = SqlToOperationConverter.convert(planner, catalogManager, node).get();
assertThat(operation).isInstanceOf(CreateTableOperation.class);
CreateTableOperation op = (CreateTableOperation) operation;
CatalogTable catalogTable = op.getCatalogTable();
Map<String, String> properties = catalogTable.toProperties();
Map<String, String> expected = new HashMap<>();
expected.put("schema.0.name", "a");
expected.put("schema.0.data-type", "INT");
expected.put("schema.1.name", "b");
expected.put("schema.1.data-type", "BIGINT");
expected.put("schema.2.name", "c");
expected.put("schema.2.data-type", "TIMESTAMP(3)");
expected.put("schema.watermark.0.rowtime", "c");
expected.put("schema.watermark.0.strategy.expr", "`builtin`.`default`.`myfunc`(`c`, 1) - INTERVAL '5' SECOND");
expected.put("schema.watermark.0.strategy.data-type", "TIMESTAMP(3)");
expected.put("connector.type", "kafka");
assertThat(properties).isEqualTo(expected);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method testCreateTable.
@Test
public void testCreateTable() {
final String sql = "CREATE TABLE tbl1 (\n" + " a bigint,\n" + " b varchar, \n" + " c int, \n" + " d varchar" + ")\n" + " PARTITIONED BY (a, d)\n" + " with (\n" + " 'connector' = 'kafka', \n" + " 'kafka.topic' = 'log.test'\n" + ")\n";
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, parser);
assertThat(operation).isInstanceOf(CreateTableOperation.class);
CreateTableOperation op = (CreateTableOperation) operation;
CatalogTable catalogTable = op.getCatalogTable();
assertThat(catalogTable.getPartitionKeys()).hasSameElementsAs(Arrays.asList("a", "d"));
assertThat(catalogTable.getSchema().getFieldNames()).isEqualTo(new String[] { "a", "b", "c", "d" });
assertThat(catalogTable.getSchema().getFieldDataTypes()).isEqualTo(new DataType[] { DataTypes.BIGINT(), DataTypes.VARCHAR(Integer.MAX_VALUE), DataTypes.INT(), DataTypes.VARCHAR(Integer.MAX_VALUE) });
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class FlinkCalciteCatalogReaderTest method testGetFlinkPreparingTableBase.
@Test
public void testGetFlinkPreparingTableBase() {
// Mock CatalogSchemaTable.
final ObjectIdentifier objectIdentifier = ObjectIdentifier.of("a", "b", "c");
final ResolvedSchema schema = new ResolvedSchema(Collections.emptyList(), Collections.emptyList(), null);
final CatalogTable catalogTable = ConnectorCatalogTable.source(new TestTableSource(true, TableSchema.fromResolvedSchema(schema)), true);
final ResolvedCatalogTable resolvedCatalogTable = new ResolvedCatalogTable(catalogTable, schema);
CatalogSchemaTable mockTable = new CatalogSchemaTable(ContextResolvedTable.permanent(objectIdentifier, CatalogManagerMocks.createEmptyCatalog(), resolvedCatalogTable), FlinkStatistic.UNKNOWN(), true);
rootSchemaPlus.add(tableMockName, mockTable);
Prepare.PreparingTable preparingTable = catalogReader.getTable(Collections.singletonList(tableMockName));
assertTrue(preparingTable instanceof FlinkPreparingTableBase);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverter method convertAlterTable.
/**
* convert ALTER TABLE statement.
*/
private Operation convertAlterTable(SqlAlterTable sqlAlterTable) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlAlterTable.fullTableName());
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
Optional<ContextResolvedTable> optionalCatalogTable = catalogManager.getTable(tableIdentifier);
if (!optionalCatalogTable.isPresent() || optionalCatalogTable.get().isTemporary()) {
throw new ValidationException(String.format("Table %s doesn't exist or is a temporary table.", tableIdentifier));
}
CatalogBaseTable baseTable = optionalCatalogTable.get().getTable();
if (baseTable instanceof CatalogView) {
throw new ValidationException("ALTER TABLE for a view is not allowed");
}
if (sqlAlterTable instanceof SqlAlterTableRename) {
UnresolvedIdentifier newUnresolvedIdentifier = UnresolvedIdentifier.of(((SqlAlterTableRename) sqlAlterTable).fullNewTableName());
ObjectIdentifier newTableIdentifier = catalogManager.qualifyIdentifier(newUnresolvedIdentifier);
return new AlterTableRenameOperation(tableIdentifier, newTableIdentifier);
} else if (sqlAlterTable instanceof SqlAlterTableOptions) {
return convertAlterTableOptions(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableOptions) sqlAlterTable);
} else if (sqlAlterTable instanceof SqlAlterTableReset) {
return convertAlterTableReset(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableReset) sqlAlterTable);
} else if (sqlAlterTable instanceof SqlAlterTableAddConstraint) {
SqlTableConstraint constraint = ((SqlAlterTableAddConstraint) sqlAlterTable).getConstraint();
validateTableConstraint(constraint);
TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
// Sanity check for constraint.
TableSchema.Builder builder = TableSchemaUtils.builderWithGivenSchema(oriSchema);
if (constraint.getConstraintName().isPresent()) {
builder.primaryKey(constraint.getConstraintName().get(), constraint.getColumnNames());
} else {
builder.primaryKey(constraint.getColumnNames());
}
builder.build();
return new AlterTableAddConstraintOperation(tableIdentifier, constraint.getConstraintName().orElse(null), constraint.getColumnNames());
} else if (sqlAlterTable instanceof SqlAlterTableDropConstraint) {
SqlAlterTableDropConstraint dropConstraint = ((SqlAlterTableDropConstraint) sqlAlterTable);
String constraintName = dropConstraint.getConstraintName().getSimple();
TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
if (!oriSchema.getPrimaryKey().filter(pk -> pk.getName().equals(constraintName)).isPresent()) {
throw new ValidationException(String.format("CONSTRAINT [%s] does not exist", constraintName));
}
return new AlterTableDropConstraintOperation(tableIdentifier, constraintName);
} else if (sqlAlterTable instanceof SqlAddReplaceColumns) {
return OperationConverterUtils.convertAddReplaceColumns(tableIdentifier, (SqlAddReplaceColumns) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
} else if (sqlAlterTable instanceof SqlChangeColumn) {
return OperationConverterUtils.convertChangeColumn(tableIdentifier, (SqlChangeColumn) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
} else if (sqlAlterTable instanceof SqlAddPartitions) {
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
SqlAddPartitions addPartitions = (SqlAddPartitions) sqlAlterTable;
for (int i = 0; i < addPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(addPartitions.getPartitionKVs(i)));
Map<String, String> props = OperationConverterUtils.extractProperties(addPartitions.getPartProps().get(i));
partitions.add(new CatalogPartitionImpl(props, null));
}
return new AddPartitionsOperation(tableIdentifier, addPartitions.ifNotExists(), specs, partitions);
} else if (sqlAlterTable instanceof SqlDropPartitions) {
SqlDropPartitions dropPartitions = (SqlDropPartitions) sqlAlterTable;
List<CatalogPartitionSpec> specs = new ArrayList<>();
for (int i = 0; i < dropPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(dropPartitions.getPartitionKVs(i)));
}
return new DropPartitionsOperation(tableIdentifier, dropPartitions.ifExists(), specs);
} else if (sqlAlterTable instanceof SqlAlterTableCompact) {
return convertAlterTableCompact(tableIdentifier, optionalCatalogTable.get(), (SqlAlterTableCompact) sqlAlterTable);
} else {
throw new ValidationException(String.format("[%s] needs to implement", sqlAlterTable.toSqlString(CalciteSqlDialect.DEFAULT)));
}
}
Aggregations