use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveTableUtil method createTableSchema.
/**
* Create a Flink's TableSchema from Hive table's columns and partition keys.
*/
public static TableSchema createTableSchema(List<FieldSchema> cols, List<FieldSchema> partitionKeys, Set<String> notNullColumns, UniqueConstraint primaryKey) {
List<FieldSchema> allCols = new ArrayList<>(cols);
allCols.addAll(partitionKeys);
String[] colNames = new String[allCols.size()];
DataType[] colTypes = new DataType[allCols.size()];
for (int i = 0; i < allCols.size(); i++) {
FieldSchema fs = allCols.get(i);
colNames[i] = fs.getName();
colTypes[i] = HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()));
if (notNullColumns.contains(colNames[i])) {
colTypes[i] = colTypes[i].notNull();
}
}
TableSchema.Builder builder = TableSchema.builder().fields(colNames, colTypes);
if (primaryKey != null) {
builder.primaryKey(primaryKey.getName(), primaryKey.getColumns().toArray(new String[0]));
}
return builder.build();
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveSourceBuilder method getProducedRowType.
private RowType getProducedRowType() {
TableSchema producedSchema;
if (projectedFields == null) {
producedSchema = fullSchema;
} else {
String[] fullNames = fullSchema.getFieldNames();
DataType[] fullTypes = fullSchema.getFieldDataTypes();
producedSchema = TableSchema.builder().fields(Arrays.stream(projectedFields).mapToObj(i -> fullNames[i]).toArray(String[]::new), Arrays.stream(projectedFields).mapToObj(i -> fullTypes[i]).toArray(DataType[]::new)).build();
}
return (RowType) producedSchema.toRowDataType().bridgedTo(RowData.class).getLogicalType();
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableChangeCol.
private Operation convertAlterTableChangeCol(CatalogBaseTable alteredTable, String[] qualified, HiveParserASTNode ast) throws SemanticException {
String newComment = null;
boolean first = false;
String flagCol = null;
boolean isCascade = false;
// col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name]
// [CASCADE|RESTRICT]
String oldColName = ast.getChild(0).getText();
String newColName = ast.getChild(1).getText();
String newType = HiveParserBaseSemanticAnalyzer.getTypeStringFromAST((HiveParserASTNode) ast.getChild(2));
int childCount = ast.getChildCount();
for (int i = 3; i < childCount; i++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
switch(child.getToken().getType()) {
case HiveASTParser.StringLiteral:
newComment = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getText());
break;
case HiveASTParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
flagCol = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
break;
case HiveASTParser.KW_FIRST:
first = true;
break;
case HiveASTParser.TOK_CASCADE:
isCascade = true;
break;
case HiveASTParser.TOK_RESTRICT:
break;
default:
throw new ValidationException("Unsupported token: " + child.getToken() + " for alter table");
}
}
// Validate the operation of renaming a column name.
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo();
if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) && skewInfo.getSkewedColNames().contains(oldColName)) {
throw new ValidationException(oldColName + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
}
String tblName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
CatalogTable oldTable = (CatalogTable) alteredTable;
String oldName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(oldColName);
String newName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(newColName);
if (oldTable.getPartitionKeys().contains(oldName)) {
// disallow changing partition columns
throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
}
TableSchema oldSchema = oldTable.getSchema();
TableColumn newTableColumn = TableColumn.physical(newName, HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(newType)));
TableSchema newSchema = OperationConverterUtils.changeColumn(oldSchema, oldName, newTableColumn, first, flagCol);
Map<String, String> props = new HashMap<>(oldTable.getOptions());
props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
if (isCascade) {
props.put(ALTER_COL_CASCADE, "true");
}
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertCreateView.
private Operation convertCreateView(HiveParserASTNode ast) throws SemanticException {
String[] qualTabName = HiveParserBaseSemanticAnalyzer.getQualifiedTableName((HiveParserASTNode) ast.getChild(0));
String dbDotTable = HiveParserBaseSemanticAnalyzer.getDotName(qualTabName);
List<FieldSchema> cols = null;
boolean ifNotExists = false;
boolean isAlterViewAs = false;
String comment = null;
HiveParserASTNode selectStmt = null;
Map<String, String> tblProps = null;
boolean isMaterialized = ast.getToken().getType() == HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW;
if (isMaterialized) {
handleUnsupportedOperation("MATERIALIZED VIEW is not supported");
}
HiveParserStorageFormat storageFormat = new HiveParserStorageFormat(conf);
LOG.info("Creating view " + dbDotTable + " position=" + ast.getCharPositionInLine());
int numCh = ast.getChildCount();
for (int num = 1; num < numCh; num++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(num);
if (storageFormat.fillStorageFormat(child)) {
handleUnsupportedOperation("FILE FORMAT for view is not supported");
}
switch(child.getToken().getType()) {
case HiveASTParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveASTParser.TOK_REWRITE_ENABLED:
handleUnsupportedOperation("MATERIALIZED VIEW REWRITE is not supported");
break;
case HiveASTParser.TOK_ORREPLACE:
handleUnsupportedOperation("CREATE OR REPLACE VIEW is not supported");
break;
case HiveASTParser.TOK_QUERY:
selectStmt = child;
break;
case HiveASTParser.TOK_TABCOLNAME:
cols = HiveParserBaseSemanticAnalyzer.getColumns(child);
break;
case HiveASTParser.TOK_TABLECOMMENT:
comment = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
break;
case HiveASTParser.TOK_TABLEPROPERTIES:
tblProps = getProps((HiveParserASTNode) child.getChild(0));
break;
case HiveASTParser.TOK_TABLEROWFORMAT:
handleUnsupportedOperation("ROW FORMAT for view is not supported");
break;
case HiveASTParser.TOK_TABLESERIALIZER:
handleUnsupportedOperation("SERDE for view is not supported");
break;
case HiveASTParser.TOK_TABLELOCATION:
handleUnsupportedOperation("LOCATION for view is not supported");
break;
case HiveASTParser.TOK_VIEWPARTCOLS:
handleUnsupportedOperation("PARTITION COLUMN for view is not supported");
break;
default:
throw new ValidationException("Unknown AST node for CREATE/ALTER VIEW: " + child);
}
}
if (ast.getToken().getType() == HiveASTParser.TOK_ALTERVIEW && ast.getChild(1).getType() == HiveASTParser.TOK_QUERY) {
isAlterViewAs = true;
}
queryState.setCommandType(HiveOperation.CREATEVIEW);
HiveParserCreateViewInfo createViewInfo = new HiveParserCreateViewInfo(dbDotTable, cols, selectStmt);
hiveParser.analyzeCreateView(createViewInfo, context, queryState, hiveShim);
ObjectIdentifier viewIdentifier = parseObjectIdentifier(createViewInfo.getCompoundName());
TableSchema schema = HiveTableUtil.createTableSchema(createViewInfo.getSchema(), Collections.emptyList(), Collections.emptySet(), null);
Map<String, String> props = new HashMap<>();
if (isAlterViewAs) {
CatalogBaseTable baseTable = getCatalogBaseTable(viewIdentifier);
props.putAll(baseTable.getOptions());
comment = baseTable.getComment();
} else {
if (tblProps != null) {
props.putAll(tblProps);
}
}
CatalogView catalogView = new CatalogViewImpl(createViewInfo.getOriginalText(), createViewInfo.getExpandedText(), schema, props, comment);
if (isAlterViewAs) {
return new AlterViewAsOperation(viewIdentifier, catalogView);
} else {
return new CreateViewOperation(viewIdentifier, catalogView, ifNotExists, false);
}
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveInputFormatPartitionReaderITCase method testReadFormat.
private void testReadFormat(TableEnvironment tableEnv, HiveCatalog hiveCatalog, String format) throws Exception {
String tableName = prepareData(tableEnv, format);
ObjectPath tablePath = new ObjectPath("default", tableName);
TableSchema tableSchema = hiveCatalog.getTable(tablePath).getSchema();
// create partition reader
HiveInputFormatPartitionReader partitionReader = new HiveInputFormatPartitionReader(new Configuration(), new JobConf(hiveCatalog.getHiveConf()), hiveCatalog.getHiveVersion(), tablePath, tableSchema.getFieldDataTypes(), tableSchema.getFieldNames(), Collections.emptyList(), null, false);
Table hiveTable = hiveCatalog.getHiveTable(tablePath);
// create HiveTablePartition to read from
HiveTablePartition tablePartition = new HiveTablePartition(hiveTable.getSd(), HiveReflectionUtils.getTableMetadata(HiveShimLoader.loadHiveShim(hiveCatalog.getHiveVersion()), hiveTable));
partitionReader.open(Collections.singletonList(tablePartition));
GenericRowData reuse = new GenericRowData(tableSchema.getFieldCount());
int count = 0;
// this follows the way the partition reader is used during lookup join
while (partitionReader.read(reuse) != null) {
count++;
}
assertEquals(CollectionUtil.iteratorToList(tableEnv.executeSql("select * from " + tableName).collect()).size(), count);
}
Aggregations