use of org.apache.flink.table.api.TableColumn in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableChangeCol.
private Operation convertAlterTableChangeCol(CatalogBaseTable alteredTable, String[] qualified, HiveParserASTNode ast) throws SemanticException {
String newComment = null;
boolean first = false;
String flagCol = null;
boolean isCascade = false;
// col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name]
// [CASCADE|RESTRICT]
String oldColName = ast.getChild(0).getText();
String newColName = ast.getChild(1).getText();
String newType = HiveParserBaseSemanticAnalyzer.getTypeStringFromAST((HiveParserASTNode) ast.getChild(2));
int childCount = ast.getChildCount();
for (int i = 3; i < childCount; i++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
switch(child.getToken().getType()) {
case HiveASTParser.StringLiteral:
newComment = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getText());
break;
case HiveASTParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
flagCol = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
break;
case HiveASTParser.KW_FIRST:
first = true;
break;
case HiveASTParser.TOK_CASCADE:
isCascade = true;
break;
case HiveASTParser.TOK_RESTRICT:
break;
default:
throw new ValidationException("Unsupported token: " + child.getToken() + " for alter table");
}
}
// Validate the operation of renaming a column name.
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo();
if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) && skewInfo.getSkewedColNames().contains(oldColName)) {
throw new ValidationException(oldColName + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
}
String tblName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
CatalogTable oldTable = (CatalogTable) alteredTable;
String oldName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(oldColName);
String newName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(newColName);
if (oldTable.getPartitionKeys().contains(oldName)) {
// disallow changing partition columns
throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
}
TableSchema oldSchema = oldTable.getSchema();
TableColumn newTableColumn = TableColumn.physical(newName, HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(newType)));
TableSchema newSchema = OperationConverterUtils.changeColumn(oldSchema, oldName, newTableColumn, first, flagCol);
Map<String, String> props = new HashMap<>(oldTable.getOptions());
props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
if (isCascade) {
props.put(ALTER_COL_CASCADE, "true");
}
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
use of org.apache.flink.table.api.TableColumn in project flink by apache.
the class SchemaValidator method deriveTableSinkSchema.
/**
* Derives the table schema for a table sink. A sink ignores a proctime attribute and needs to
* track the origin of a rowtime field.
*
* @deprecated This method combines two separate concepts of table schema and field mapping.
* This should be split into two methods once we have support for the corresponding
* interfaces (see FLINK-9870).
*/
@Deprecated
public static TableSchema deriveTableSinkSchema(DescriptorProperties properties) {
TableSchema.Builder builder = TableSchema.builder();
TableSchema tableSchema = properties.getTableSchema(SCHEMA);
for (int i = 0; i < tableSchema.getFieldCount(); i++) {
final TableColumn tableColumn = tableSchema.getTableColumns().get(i);
final String fieldName = tableColumn.getName();
final DataType dataType = tableColumn.getType();
if (!tableColumn.isPhysical()) {
// skip non-physical column
continue;
}
boolean isProctime = properties.getOptionalBoolean(SCHEMA + "." + i + "." + SCHEMA_PROCTIME).orElse(false);
String tsType = SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_TYPE;
boolean isRowtime = properties.containsKey(tsType);
if (!isProctime && !isRowtime) {
// check for a aliasing
String aliasName = properties.getOptionalString(SCHEMA + "." + i + "." + SCHEMA_FROM).orElse(fieldName);
builder.field(aliasName, dataType);
} else // only use the rowtime attribute if it references a field
if (isRowtime) {
switch(properties.getString(tsType)) {
case ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD:
String field = properties.getString(SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_FROM);
builder.field(field, dataType);
break;
// insert the timestamp into the output
default:
throw new TableException(format("Unsupported rowtime type '%s' for sink" + " table schema. Currently only '%s' is supported for table sinks.", dataType, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD));
}
}
}
return builder.build();
}
use of org.apache.flink.table.api.TableColumn in project flink by apache.
the class OperationConverterUtils method changeColumn.
// change a column in the old table schema and return the updated table schema
public static TableSchema changeColumn(TableSchema oldSchema, String oldName, TableColumn newTableColumn, boolean first, String after) {
int oldIndex = Arrays.asList(oldSchema.getFieldNames()).indexOf(oldName);
if (oldIndex < 0) {
throw new ValidationException(String.format("Old column %s not found for CHANGE COLUMN", oldName));
}
List<TableColumn> tableColumns = oldSchema.getTableColumns();
if ((!first && after == null) || oldName.equals(after)) {
tableColumns.set(oldIndex, newTableColumn);
} else {
// need to change column position
tableColumns.remove(oldIndex);
if (first) {
tableColumns.add(0, newTableColumn);
} else {
int newIndex = tableColumns.stream().map(TableColumn::getName).collect(Collectors.toList()).indexOf(after);
if (newIndex < 0) {
throw new ValidationException(String.format("After column %s not found for CHANGE COLUMN", after));
}
tableColumns.add(newIndex + 1, newTableColumn);
}
}
TableSchema.Builder builder = TableSchema.builder();
for (TableColumn column : tableColumns) {
builder.add(column);
}
setWatermarkAndPK(builder, oldSchema);
return builder.build();
}
use of org.apache.flink.table.api.TableColumn in project flink by apache.
the class OperationConverterUtils method convertAddReplaceColumns.
public static Operation convertAddReplaceColumns(ObjectIdentifier tableIdentifier, SqlAddReplaceColumns addReplaceColumns, CatalogTable catalogTable, SqlValidator sqlValidator) {
// This is only used by the Hive dialect at the moment. In Hive, only non-partition columns
// can be
// added/replaced and users will only define non-partition columns in the new column list.
// Therefore, we require
// that partitions columns must appear last in the schema (which is inline with Hive).
// Otherwise, we won't be
// able to determine the column positions after the non-partition columns are replaced.
TableSchema oldSchema = catalogTable.getSchema();
int numPartCol = catalogTable.getPartitionKeys().size();
Set<String> lastCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount()).stream().map(TableColumn::getName).collect(Collectors.toSet());
if (!lastCols.equals(new HashSet<>(catalogTable.getPartitionKeys()))) {
throw new ValidationException("ADD/REPLACE COLUMNS on partitioned tables requires partition columns to appear last");
}
// set non-partition columns
TableSchema.Builder builder = TableSchema.builder();
if (!addReplaceColumns.isReplace()) {
List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
for (TableColumn column : nonPartCols) {
builder.add(column);
}
setWatermarkAndPK(builder, catalogTable.getSchema());
}
for (SqlNode sqlNode : addReplaceColumns.getNewColumns()) {
builder.add(toTableColumn((SqlTableColumn) sqlNode, sqlValidator));
}
// set partition columns
List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
for (TableColumn column : partCols) {
builder.add(column);
}
// set properties
Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
newProperties.putAll(extractProperties(addReplaceColumns.getProperties()));
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
}
use of org.apache.flink.table.api.TableColumn in project pulsar-flink by streamnative.
the class FlinkPulsarTableITest method testBasicFunctioning.
@Test(timeout = 40 * 1000L)
public void testBasicFunctioning() throws Exception {
StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
see.setParallelism(1);
StreamTableEnvironment tEnv = StreamTableEnvironment.create(see);
String table = newTopic();
String tableName = TopicName.get(table).getLocalName();
sendTypedMessages(table, SchemaType.BOOLEAN, BOOLEAN_LIST, Optional.empty());
TableSchema tSchema = getTableSchema(table);
List<String> columns = new ArrayList<>();
for (TableColumn tableColumn : tSchema.getTableColumns()) {
final String column = MessageFormat.format(" `{0}` {1}", tableColumn.getName(), tableColumn.getType().getLogicalType().asSerializableString());
columns.add(column);
}
tEnv.executeSql(createTableSql(tableName, table, tSchema, "atomic")).print();
Table t = tEnv.sqlQuery("select `value` from " + tableName);
tEnv.toDataStream(t, Boolean.class).map(new FailingIdentityMapper<>(BOOLEAN_LIST.size())).addSink(new SingletonStreamSink.StringSink<>()).setParallelism(1);
TestUtils.tryExecute(see, "basic functionality");
SingletonStreamSink.compareWithList(BOOLEAN_LIST.subList(0, BOOLEAN_LIST.size() - 1).stream().map(Objects::toString).collect(Collectors.toList()));
}
Aggregations