use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogDataTypeTest method testNonSupportedVarBinaryDataTypes.
@Test
public void testNonSupportedVarBinaryDataTypes() throws Exception {
DataType[] types = new DataType[] { DataTypes.VARBINARY(20) };
CatalogTable table = createCatalogTable(types);
catalog.createDatabase(db1, createDb(), false);
exception.expect(UnsupportedOperationException.class);
catalog.createTable(path1, table, false);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testAlterFlinkNonManagedTableToHiveTable.
@Test
public void testAlterFlinkNonManagedTableToHiveTable() throws Exception {
Map<String, String> originOptions = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), DataGenTableSourceFactory.IDENTIFIER);
CatalogTable originTable = new CatalogTableImpl(schema, originOptions, "Flink non-managed table");
hiveCatalog.createTable(tablePath, originTable, false);
Map<String, String> newOptions = getLegacyFileSystemConnectorOptions("/test_path");
newOptions.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
CatalogTable newTable = new CatalogTableImpl(schema, newOptions, "Hive table");
assertThatThrownBy(() -> hiveCatalog.alterTable(tablePath, newTable, false)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("Changing catalog table type is not allowed. " + "Existing table type is 'FLINK_NON_MANAGED_TABLE', but new table type is 'HIVE_TABLE'");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogTest method testAlterHiveTableToFlinkNonManagedTable.
@Test
public void testAlterHiveTableToFlinkNonManagedTable() throws Exception {
Map<String, String> originOptions = getLegacyFileSystemConnectorOptions("/test_path");
originOptions.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
CatalogTable originTable = new CatalogTableImpl(schema, originOptions, "Hive table");
hiveCatalog.createTable(tablePath, originTable, false);
Map<String, String> newOptions = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), DataGenTableSourceFactory.IDENTIFIER);
CatalogTable newTable = new CatalogTableImpl(schema, newOptions, "Flink managed table");
assertThatThrownBy(() -> hiveCatalog.alterTable(tablePath, newTable, false)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("Changing catalog table type is not allowed. " + "Existing table type is 'HIVE_TABLE', but new table type is 'FLINK_NON_MANAGED_TABLE'");
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableChangeCol.
private Operation convertAlterTableChangeCol(CatalogBaseTable alteredTable, String[] qualified, HiveParserASTNode ast) throws SemanticException {
String newComment = null;
boolean first = false;
String flagCol = null;
boolean isCascade = false;
// col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name]
// [CASCADE|RESTRICT]
String oldColName = ast.getChild(0).getText();
String newColName = ast.getChild(1).getText();
String newType = HiveParserBaseSemanticAnalyzer.getTypeStringFromAST((HiveParserASTNode) ast.getChild(2));
int childCount = ast.getChildCount();
for (int i = 3; i < childCount; i++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
switch(child.getToken().getType()) {
case HiveASTParser.StringLiteral:
newComment = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getText());
break;
case HiveASTParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
flagCol = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
break;
case HiveASTParser.KW_FIRST:
first = true;
break;
case HiveASTParser.TOK_CASCADE:
isCascade = true;
break;
case HiveASTParser.TOK_RESTRICT:
break;
default:
throw new ValidationException("Unsupported token: " + child.getToken() + " for alter table");
}
}
// Validate the operation of renaming a column name.
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo();
if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) && skewInfo.getSkewedColNames().contains(oldColName)) {
throw new ValidationException(oldColName + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
}
String tblName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
CatalogTable oldTable = (CatalogTable) alteredTable;
String oldName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(oldColName);
String newName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(newColName);
if (oldTable.getPartitionKeys().contains(oldName)) {
// disallow changing partition columns
throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
}
TableSchema oldSchema = oldTable.getSchema();
TableColumn newTableColumn = TableColumn.physical(newName, HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(newType)));
TableSchema newSchema = OperationConverterUtils.changeColumn(oldSchema, oldName, newTableColumn, first, flagCol);
Map<String, String> props = new HashMap<>(oldTable.getOptions());
props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
if (isCascade) {
props.put(ALTER_COL_CASCADE, "true");
}
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogITCase method testCsvTableViaAPI.
@Test
public void testCsvTableViaAPI() throws Exception {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inBatchMode());
tableEnv.getConfig().addConfiguration(new Configuration().set(CoreOptions.DEFAULT_PARALLELISM, 1));
tableEnv.registerCatalog("myhive", hiveCatalog);
tableEnv.useCatalog("myhive");
final TableSchema schema = TableSchema.builder().field("name", DataTypes.STRING()).field("age", DataTypes.INT()).build();
final Map<String, String> sourceOptions = new HashMap<>();
sourceOptions.put("connector.type", "filesystem");
sourceOptions.put("connector.path", getClass().getResource("/csv/test.csv").getPath());
sourceOptions.put("format.type", "csv");
CatalogTable source = new CatalogTableImpl(schema, sourceOptions, "Comment.");
Path p = Paths.get(tempFolder.newFolder().getAbsolutePath(), "test.csv");
final Map<String, String> sinkOptions = new HashMap<>();
sinkOptions.put("connector.type", "filesystem");
sinkOptions.put("connector.path", p.toAbsolutePath().toString());
sinkOptions.put("format.type", "csv");
CatalogTable sink = new CatalogTableImpl(schema, sinkOptions, "Comment.");
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sourceTableName), source, false);
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sinkTableName), sink, false);
Table t = tableEnv.sqlQuery(String.format("select * from myhive.`default`.%s", sourceTableName));
List<Row> result = CollectionUtil.iteratorToList(t.execute().collect());
result.sort(Comparator.comparing(String::valueOf));
// assert query result
assertThat(result).containsExactly(Row.of("1", 1), Row.of("2", 2), Row.of("3", 3));
tableEnv.executeSql(String.format("insert into myhive.`default`.%s select * from myhive.`default`.%s", sinkTableName, sourceTableName)).await();
// assert written result
File resultFile = new File(p.toAbsolutePath().toString());
BufferedReader reader = new BufferedReader(new FileReader(resultFile));
String readLine;
for (int i = 0; i < 3; i++) {
readLine = reader.readLine();
assertThat(readLine).isEqualTo(String.format("%d,%d", i + 1, i + 1));
}
// No more line
assertThat(reader.readLine()).isNull();
tableEnv.executeSql(String.format("DROP TABLE %s", sourceTableName));
tableEnv.executeSql(String.format("DROP TABLE %s", sinkTableName));
}
Aggregations