use of org.apache.flink.table.catalog.CatalogView in project flink by apache.
the class HiveTableUtil method instantiateHiveTable.
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf, boolean managedTable) {
final boolean isView = table instanceof CatalogView;
// let Hive set default parameters for us, e.g. serialization.format
Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName());
hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
Map<String, String> properties = new HashMap<>(table.getOptions());
if (managedTable) {
properties.put(CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
}
// Table comment
if (table.getComment() != null) {
properties.put(HiveCatalogConfig.COMMENT, table.getComment());
}
boolean isHiveTable = HiveCatalog.isHiveTable(properties);
// Hive table's StorageDescriptor
StorageDescriptor sd = hiveTable.getSd();
HiveTableUtil.setDefaultStorageFormat(sd, hiveConf);
// because hive cannot understand the expanded query anyway
if (isHiveTable && !isView) {
HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf);
List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema());
// Table columns and partition keys
if (table instanceof CatalogTable) {
CatalogTable catalogTable = (CatalogTable) table;
if (catalogTable.isPartitioned()) {
int partitionKeySize = catalogTable.getPartitionKeys().size();
List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize);
List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size());
sd.setCols(regularColumns);
hiveTable.setPartitionKeys(partitionColumns);
} else {
sd.setCols(allColumns);
hiveTable.setPartitionKeys(new ArrayList<>());
}
} else {
sd.setCols(allColumns);
}
// Table properties
hiveTable.getParameters().putAll(properties);
} else {
DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema());
if (table instanceof CatalogTable) {
tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys());
}
properties.putAll(tableSchemaProps.asMap());
properties = maskFlinkProperties(properties);
// 2. when creating views which don't have connector properties
if (isView || (!properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR.key()) && !properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR_TYPE))) {
properties.put(IS_GENERIC, "true");
}
hiveTable.setParameters(properties);
}
if (isView) {
// TODO: [FLINK-12398] Support partitioned view in catalog API
hiveTable.setPartitionKeys(new ArrayList<>());
CatalogView view = (CatalogView) table;
hiveTable.setViewOriginalText(view.getOriginalQuery());
hiveTable.setViewExpandedText(view.getExpandedQuery());
hiveTable.setTableType(TableType.VIRTUAL_VIEW.name());
}
return hiveTable;
}
use of org.apache.flink.table.catalog.CatalogView in project flink by apache.
the class HiveCatalogHiveMetadataTest method testViewCompatibility.
// ------ table and column stats ------
@Test
public void testViewCompatibility() throws Exception {
// we always store view schema via properties now
// make sure non-generic views created previously can still be used
catalog.createDatabase(db1, createDb(), false);
Table hiveView = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(path1.getDatabaseName(), path1.getObjectName());
// mark as a view
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
final String originQuery = "view origin query";
final String expandedQuery = "view expanded query";
hiveView.setViewOriginalText(originQuery);
hiveView.setViewExpandedText(expandedQuery);
// set schema in SD
Schema schema = Schema.newBuilder().fromFields(new String[] { "i", "s" }, new AbstractDataType[] { DataTypes.INT(), DataTypes.STRING() }).build();
List<FieldSchema> fields = new ArrayList<>();
for (Schema.UnresolvedColumn column : schema.getColumns()) {
String name = column.getName();
DataType type = (DataType) ((Schema.UnresolvedPhysicalColumn) column).getDataType();
fields.add(new FieldSchema(name, HiveTypeUtil.toHiveTypeInfo(type, true).getTypeName(), null));
}
hiveView.getSd().setCols(fields);
// test mark as non-generic with is_generic
hiveView.getParameters().put(CatalogPropertiesUtil.IS_GENERIC, "false");
// add some other properties
hiveView.getParameters().put("k1", "v1");
((HiveCatalog) catalog).client.createTable(hiveView);
CatalogBaseTable baseTable = catalog.getTable(path1);
assertTrue(baseTable instanceof CatalogView);
CatalogView catalogView = (CatalogView) baseTable;
assertEquals(schema, catalogView.getUnresolvedSchema());
assertEquals(originQuery, catalogView.getOriginalQuery());
assertEquals(expandedQuery, catalogView.getExpandedQuery());
assertEquals("v1", catalogView.getOptions().get("k1"));
// test mark as non-generic with connector
hiveView.setDbName(path3.getDatabaseName());
hiveView.setTableName(path3.getObjectName());
hiveView.getParameters().remove(CatalogPropertiesUtil.IS_GENERIC);
hiveView.getParameters().put(CONNECTOR.key(), IDENTIFIER);
((HiveCatalog) catalog).client.createTable(hiveView);
baseTable = catalog.getTable(path3);
assertTrue(baseTable instanceof CatalogView);
catalogView = (CatalogView) baseTable;
assertEquals(schema, catalogView.getUnresolvedSchema());
assertEquals(originQuery, catalogView.getOriginalQuery());
assertEquals(expandedQuery, catalogView.getExpandedQuery());
assertEquals("v1", catalogView.getOptions().get("k1"));
}
use of org.apache.flink.table.catalog.CatalogView in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertDropTable.
private Operation convertDropTable(HiveParserASTNode ast, TableType expectedType) {
String tableName = HiveParserBaseSemanticAnalyzer.getUnescapedName((HiveParserASTNode) ast.getChild(0));
boolean ifExists = (ast.getFirstChildWithType(HiveASTParser.TOK_IFEXISTS) != null);
ObjectIdentifier identifier = parseObjectIdentifier(tableName);
CatalogBaseTable baseTable = getCatalogBaseTable(identifier, true);
if (expectedType == TableType.VIRTUAL_VIEW) {
if (baseTable instanceof CatalogTable) {
throw new ValidationException("DROP VIEW for a table is not allowed");
}
return new DropViewOperation(identifier, ifExists, false);
} else {
if (baseTable instanceof CatalogView) {
throw new ValidationException("DROP TABLE for a view is not allowed");
}
return new DropTableOperation(identifier, ifExists, false);
}
}
use of org.apache.flink.table.catalog.CatalogView in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method getAlteredTable.
private CatalogBaseTable getAlteredTable(String tableName, boolean expectView) {
ObjectIdentifier objectIdentifier = parseObjectIdentifier(tableName);
CatalogBaseTable catalogBaseTable = getCatalogBaseTable(objectIdentifier);
if (expectView) {
if (catalogBaseTable instanceof CatalogTable) {
throw new ValidationException("ALTER VIEW for a table is not allowed");
}
} else {
if (catalogBaseTable instanceof CatalogView) {
throw new ValidationException("ALTER TABLE for a view is not allowed");
}
}
return catalogBaseTable;
}
use of org.apache.flink.table.catalog.CatalogView in project flink by apache.
the class HiveDialectITCase method testView.
@Test
public void testView() throws Exception {
tableEnv.executeSql("create table tbl (x int,y string)");
// create
tableEnv.executeSql("create view v(vx) comment 'v comment' tblproperties ('k1'='v1') as select x from tbl");
ObjectPath viewPath = new ObjectPath("default", "v");
CatalogBaseTable catalogBaseTable = hiveCatalog.getTable(viewPath);
assertTrue(catalogBaseTable instanceof CatalogView);
assertEquals("vx", catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName());
assertEquals("v1", catalogBaseTable.getOptions().get("k1"));
// change properties
tableEnv.executeSql("alter view v set tblproperties ('k1'='v11')");
catalogBaseTable = hiveCatalog.getTable(viewPath);
assertEquals("v11", catalogBaseTable.getOptions().get("k1"));
// change query
tableEnv.executeSql("alter view v as select y from tbl");
catalogBaseTable = hiveCatalog.getTable(viewPath);
assertEquals("y", catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName());
// rename
tableEnv.executeSql("alter view v rename to v1");
viewPath = new ObjectPath("default", "v1");
assertTrue(hiveCatalog.tableExists(viewPath));
// drop
tableEnv.executeSql("drop view v1");
assertFalse(hiveCatalog.tableExists(viewPath));
}
Aggregations