use of org.apache.flink.table.api.TableColumn in project flink by splunk.
the class OperationConverterUtils method convertAddReplaceColumns.
public static Operation convertAddReplaceColumns(ObjectIdentifier tableIdentifier, SqlAddReplaceColumns addReplaceColumns, CatalogTable catalogTable, SqlValidator sqlValidator) {
// This is only used by the Hive dialect at the moment. In Hive, only non-partition columns
// can be
// added/replaced and users will only define non-partition columns in the new column list.
// Therefore, we require
// that partitions columns must appear last in the schema (which is inline with Hive).
// Otherwise, we won't be
// able to determine the column positions after the non-partition columns are replaced.
TableSchema oldSchema = catalogTable.getSchema();
int numPartCol = catalogTable.getPartitionKeys().size();
Set<String> lastCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount()).stream().map(TableColumn::getName).collect(Collectors.toSet());
if (!lastCols.equals(new HashSet<>(catalogTable.getPartitionKeys()))) {
throw new ValidationException("ADD/REPLACE COLUMNS on partitioned tables requires partition columns to appear last");
}
// set non-partition columns
TableSchema.Builder builder = TableSchema.builder();
if (!addReplaceColumns.isReplace()) {
List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
for (TableColumn column : nonPartCols) {
builder.add(column);
}
setWatermarkAndPK(builder, catalogTable.getSchema());
}
for (SqlNode sqlNode : addReplaceColumns.getNewColumns()) {
builder.add(toTableColumn((SqlTableColumn) sqlNode, sqlValidator));
}
// set partition columns
List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
for (TableColumn column : partCols) {
builder.add(column);
}
// set properties
Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
newProperties.putAll(extractProperties(addReplaceColumns.getProperties()));
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
}
use of org.apache.flink.table.api.TableColumn in project flink by splunk.
the class OperationConverterUtils method convertChangeColumn.
public static Operation convertChangeColumn(ObjectIdentifier tableIdentifier, SqlChangeColumn changeColumn, CatalogTable catalogTable, SqlValidator sqlValidator) {
String oldName = changeColumn.getOldName().getSimple();
if (catalogTable.getPartitionKeys().indexOf(oldName) >= 0) {
// disallow changing partition columns
throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
}
TableSchema oldSchema = catalogTable.getSchema();
boolean first = changeColumn.isFirst();
String after = changeColumn.getAfter() == null ? null : changeColumn.getAfter().getSimple();
TableColumn newTableColumn = toTableColumn(changeColumn.getNewColumn(), sqlValidator);
TableSchema newSchema = changeColumn(oldSchema, oldName, newTableColumn, first, after);
Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
newProperties.putAll(extractProperties(changeColumn.getProperties()));
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
// TODO: handle watermark and constraints
}
use of org.apache.flink.table.api.TableColumn in project flink-mirror by flink-ci.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableModifyCols.
private Operation convertAlterTableModifyCols(CatalogBaseTable alteredTable, String tblName, HiveParserASTNode ast, boolean replace) throws SemanticException {
List<FieldSchema> newCols = HiveParserBaseSemanticAnalyzer.getColumns((HiveParserASTNode) ast.getChild(0));
boolean isCascade = false;
if (null != ast.getFirstChildWithType(HiveASTParser.TOK_CASCADE)) {
isCascade = true;
}
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
CatalogTable oldTable = (CatalogTable) alteredTable;
// prepare properties
Map<String, String> props = new HashMap<>(oldTable.getOptions());
props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
if (isCascade) {
props.put(ALTER_COL_CASCADE, "true");
}
TableSchema oldSchema = oldTable.getSchema();
final int numPartCol = oldTable.getPartitionKeys().size();
TableSchema.Builder builder = TableSchema.builder();
// add existing non-part col if we're not replacing
if (!replace) {
List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
for (TableColumn column : nonPartCols) {
builder.add(column);
}
setWatermarkAndPK(builder, oldSchema);
}
// add new cols
for (FieldSchema col : newCols) {
builder.add(TableColumn.physical(col.getName(), HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(col.getType()))));
}
// add part cols
List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
for (TableColumn column : partCols) {
builder.add(column);
}
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
Aggregations