use of org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint in project flink by apache.
the class SqlCreateHiveTable method unparse.
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("CREATE");
if (isTemporary()) {
writer.keyword("TEMPORARY");
}
if (isExternal) {
writer.keyword("EXTERNAL");
}
writer.keyword("TABLE");
if (ifNotExists) {
writer.keyword("IF NOT EXISTS");
}
getTableName().unparse(writer, leftPrec, rightPrec);
// columns
SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.create("sds"), "(", ")");
unparseColumns(creationContext, origColList, writer, leftPrec, rightPrec);
for (SqlTableConstraint tableConstraint : creationContext.constraints) {
printIndent(writer);
tableConstraint.getConstraintNameIdentifier().ifPresent(name -> {
writer.keyword("CONSTRAINT");
name.unparse(writer, leftPrec, rightPrec);
});
writer.keyword("PRIMARY KEY");
SqlWriter.Frame pkFrame = writer.startList("(", ")");
tableConstraint.getColumns().unparse(writer, leftPrec, rightPrec);
writer.endList(pkFrame);
creationContext.pkTrait.unparse(writer, leftPrec, rightPrec);
}
writer.newlineAndIndent();
writer.endList(frame);
// table comment
getComment().ifPresent(c -> {
writer.keyword("COMMENT");
c.unparse(writer, leftPrec, rightPrec);
});
// partitions
if (origPartColList.size() > 0) {
writer.newlineAndIndent();
writer.keyword("PARTITIONED BY");
SqlWriter.Frame partitionedByFrame = writer.startList("(", ")");
unparseColumns(creationContext, origPartColList, writer, leftPrec, rightPrec);
writer.newlineAndIndent();
writer.endList(partitionedByFrame);
}
// row format
unparseRowFormat(writer, leftPrec, rightPrec);
// stored as
unparseStoredAs(writer, leftPrec, rightPrec);
// location
if (location != null) {
writer.newlineAndIndent();
writer.keyword("LOCATION");
location.unparse(writer, leftPrec, rightPrec);
}
// properties
if (originPropList.size() > 0) {
writer.newlineAndIndent();
writer.keyword("TBLPROPERTIES");
unparsePropList(originPropList, writer, leftPrec, rightPrec);
}
}
use of org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint in project flink by apache.
the class SqlCreateTable method unparse.
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("CREATE");
if (isTemporary()) {
writer.keyword("TEMPORARY");
}
writer.keyword("TABLE");
if (isIfNotExists()) {
writer.keyword("IF NOT EXISTS");
}
tableName.unparse(writer, leftPrec, rightPrec);
if (columnList.size() > 0 || tableConstraints.size() > 0 || watermark != null) {
SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.create("sds"), "(", ")");
for (SqlNode column : columnList) {
printIndent(writer);
column.unparse(writer, leftPrec, rightPrec);
}
if (tableConstraints.size() > 0) {
for (SqlTableConstraint constraint : tableConstraints) {
printIndent(writer);
constraint.unparse(writer, leftPrec, rightPrec);
}
}
if (watermark != null) {
printIndent(writer);
watermark.unparse(writer, leftPrec, rightPrec);
}
writer.newlineAndIndent();
writer.endList(frame);
}
if (comment != null) {
writer.newlineAndIndent();
writer.keyword("COMMENT");
comment.unparse(writer, leftPrec, rightPrec);
}
if (this.partitionKeyList.size() > 0) {
writer.newlineAndIndent();
writer.keyword("PARTITIONED BY");
SqlWriter.Frame partitionedByFrame = writer.startList("(", ")");
this.partitionKeyList.unparse(writer, leftPrec, rightPrec);
writer.endList(partitionedByFrame);
writer.newlineAndIndent();
}
if (this.propertyList.size() > 0) {
writer.keyword("WITH");
SqlWriter.Frame withFrame = writer.startList("(", ")");
for (SqlNode property : propertyList) {
printIndent(writer);
property.unparse(writer, leftPrec, rightPrec);
}
writer.newlineAndIndent();
writer.endList(withFrame);
}
if (this.tableLike != null) {
writer.newlineAndIndent();
this.tableLike.unparse(writer, leftPrec, rightPrec);
}
}
use of org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint in project flink by apache.
the class SqlCreateTable method getFullConstraints.
/**
* Returns the column constraints plus the table constraints.
*/
public List<SqlTableConstraint> getFullConstraints() {
List<SqlTableConstraint> ret = new ArrayList<>();
this.columnList.forEach(column -> {
SqlTableColumn tableColumn = (SqlTableColumn) column;
if (tableColumn instanceof SqlRegularColumn) {
SqlRegularColumn regularColumn = (SqlRegularColumn) tableColumn;
regularColumn.getConstraint().map(ret::add);
}
});
ret.addAll(this.tableConstraints);
return ret;
}
use of org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint in project flink by apache.
the class SqlToOperationConverter method convertAlterTable.
/**
* convert ALTER TABLE statement.
*/
private Operation convertAlterTable(SqlAlterTable sqlAlterTable) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlAlterTable.fullTableName());
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
Optional<ContextResolvedTable> optionalCatalogTable = catalogManager.getTable(tableIdentifier);
if (!optionalCatalogTable.isPresent() || optionalCatalogTable.get().isTemporary()) {
throw new ValidationException(String.format("Table %s doesn't exist or is a temporary table.", tableIdentifier));
}
CatalogBaseTable baseTable = optionalCatalogTable.get().getTable();
if (baseTable instanceof CatalogView) {
throw new ValidationException("ALTER TABLE for a view is not allowed");
}
if (sqlAlterTable instanceof SqlAlterTableRename) {
UnresolvedIdentifier newUnresolvedIdentifier = UnresolvedIdentifier.of(((SqlAlterTableRename) sqlAlterTable).fullNewTableName());
ObjectIdentifier newTableIdentifier = catalogManager.qualifyIdentifier(newUnresolvedIdentifier);
return new AlterTableRenameOperation(tableIdentifier, newTableIdentifier);
} else if (sqlAlterTable instanceof SqlAlterTableOptions) {
return convertAlterTableOptions(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableOptions) sqlAlterTable);
} else if (sqlAlterTable instanceof SqlAlterTableReset) {
return convertAlterTableReset(tableIdentifier, (CatalogTable) baseTable, (SqlAlterTableReset) sqlAlterTable);
} else if (sqlAlterTable instanceof SqlAlterTableAddConstraint) {
SqlTableConstraint constraint = ((SqlAlterTableAddConstraint) sqlAlterTable).getConstraint();
validateTableConstraint(constraint);
TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
// Sanity check for constraint.
TableSchema.Builder builder = TableSchemaUtils.builderWithGivenSchema(oriSchema);
if (constraint.getConstraintName().isPresent()) {
builder.primaryKey(constraint.getConstraintName().get(), constraint.getColumnNames());
} else {
builder.primaryKey(constraint.getColumnNames());
}
builder.build();
return new AlterTableAddConstraintOperation(tableIdentifier, constraint.getConstraintName().orElse(null), constraint.getColumnNames());
} else if (sqlAlterTable instanceof SqlAlterTableDropConstraint) {
SqlAlterTableDropConstraint dropConstraint = ((SqlAlterTableDropConstraint) sqlAlterTable);
String constraintName = dropConstraint.getConstraintName().getSimple();
TableSchema oriSchema = TableSchema.fromResolvedSchema(baseTable.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
if (!oriSchema.getPrimaryKey().filter(pk -> pk.getName().equals(constraintName)).isPresent()) {
throw new ValidationException(String.format("CONSTRAINT [%s] does not exist", constraintName));
}
return new AlterTableDropConstraintOperation(tableIdentifier, constraintName);
} else if (sqlAlterTable instanceof SqlAddReplaceColumns) {
return OperationConverterUtils.convertAddReplaceColumns(tableIdentifier, (SqlAddReplaceColumns) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
} else if (sqlAlterTable instanceof SqlChangeColumn) {
return OperationConverterUtils.convertChangeColumn(tableIdentifier, (SqlChangeColumn) sqlAlterTable, (CatalogTable) baseTable, flinkPlanner.getOrCreateSqlValidator());
} else if (sqlAlterTable instanceof SqlAddPartitions) {
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
SqlAddPartitions addPartitions = (SqlAddPartitions) sqlAlterTable;
for (int i = 0; i < addPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(addPartitions.getPartitionKVs(i)));
Map<String, String> props = OperationConverterUtils.extractProperties(addPartitions.getPartProps().get(i));
partitions.add(new CatalogPartitionImpl(props, null));
}
return new AddPartitionsOperation(tableIdentifier, addPartitions.ifNotExists(), specs, partitions);
} else if (sqlAlterTable instanceof SqlDropPartitions) {
SqlDropPartitions dropPartitions = (SqlDropPartitions) sqlAlterTable;
List<CatalogPartitionSpec> specs = new ArrayList<>();
for (int i = 0; i < dropPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(dropPartitions.getPartitionKVs(i)));
}
return new DropPartitionsOperation(tableIdentifier, dropPartitions.ifExists(), specs);
} else if (sqlAlterTable instanceof SqlAlterTableCompact) {
return convertAlterTableCompact(tableIdentifier, optionalCatalogTable.get(), (SqlAlterTableCompact) sqlAlterTable);
} else {
throw new ValidationException(String.format("[%s] needs to implement", sqlAlterTable.toSqlString(CalciteSqlDialect.DEFAULT)));
}
}
use of org.apache.flink.sql.parser.ddl.constraint.SqlTableConstraint in project flink by apache.
the class SqlCreateTableConverter method createCatalogTable.
private CatalogTable createCatalogTable(SqlCreateTable sqlCreateTable) {
final TableSchema sourceTableSchema;
final List<String> sourcePartitionKeys;
final List<SqlTableLike.SqlTableLikeOption> likeOptions;
final Map<String, String> sourceProperties;
if (sqlCreateTable.getTableLike().isPresent()) {
SqlTableLike sqlTableLike = sqlCreateTable.getTableLike().get();
CatalogTable table = lookupLikeSourceTable(sqlTableLike);
sourceTableSchema = TableSchema.fromResolvedSchema(table.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
sourcePartitionKeys = table.getPartitionKeys();
likeOptions = sqlTableLike.getOptions();
sourceProperties = table.getOptions();
} else {
sourceTableSchema = TableSchema.builder().build();
sourcePartitionKeys = Collections.emptyList();
likeOptions = Collections.emptyList();
sourceProperties = Collections.emptyMap();
}
Map<SqlTableLike.FeatureOption, SqlTableLike.MergingStrategy> mergingStrategies = mergeTableLikeUtil.computeMergingStrategies(likeOptions);
Map<String, String> mergedOptions = mergeOptions(sqlCreateTable, sourceProperties, mergingStrategies);
Optional<SqlTableConstraint> primaryKey = sqlCreateTable.getFullConstraints().stream().filter(SqlTableConstraint::isPrimaryKey).findAny();
TableSchema mergedSchema = mergeTableLikeUtil.mergeTables(mergingStrategies, sourceTableSchema, sqlCreateTable.getColumnList().getList(), sqlCreateTable.getWatermark().map(Collections::singletonList).orElseGet(Collections::emptyList), primaryKey.orElse(null));
List<String> partitionKeys = mergePartitions(sourcePartitionKeys, sqlCreateTable.getPartitionKeyList(), mergingStrategies);
verifyPartitioningColumnsExist(mergedSchema, partitionKeys);
String tableComment = sqlCreateTable.getComment().map(comment -> comment.getNlsString().getValue()).orElse(null);
return new CatalogTableImpl(mergedSchema, partitionKeys, mergedOptions, tableComment);
}
Aggregations