Search in sources :

Example 1 with CatalogBaseTable

use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.

the class HiveTableUtil method instantiateHiveTable.

public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf, boolean managedTable) {
    final boolean isView = table instanceof CatalogView;
    // let Hive set default parameters for us, e.g. serialization.format
    Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName());
    hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
    Map<String, String> properties = new HashMap<>(table.getOptions());
    if (managedTable) {
        properties.put(CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
    }
    // Table comment
    if (table.getComment() != null) {
        properties.put(HiveCatalogConfig.COMMENT, table.getComment());
    }
    boolean isHiveTable = HiveCatalog.isHiveTable(properties);
    // Hive table's StorageDescriptor
    StorageDescriptor sd = hiveTable.getSd();
    HiveTableUtil.setDefaultStorageFormat(sd, hiveConf);
    // because hive cannot understand the expanded query anyway
    if (isHiveTable && !isView) {
        HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf);
        List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema());
        // Table columns and partition keys
        if (table instanceof CatalogTable) {
            CatalogTable catalogTable = (CatalogTable) table;
            if (catalogTable.isPartitioned()) {
                int partitionKeySize = catalogTable.getPartitionKeys().size();
                List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize);
                List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size());
                sd.setCols(regularColumns);
                hiveTable.setPartitionKeys(partitionColumns);
            } else {
                sd.setCols(allColumns);
                hiveTable.setPartitionKeys(new ArrayList<>());
            }
        } else {
            sd.setCols(allColumns);
        }
        // Table properties
        hiveTable.getParameters().putAll(properties);
    } else {
        DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
        tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema());
        if (table instanceof CatalogTable) {
            tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys());
        }
        properties.putAll(tableSchemaProps.asMap());
        properties = maskFlinkProperties(properties);
        // 2. when creating views which don't have connector properties
        if (isView || (!properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR.key()) && !properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR_TYPE))) {
            properties.put(IS_GENERIC, "true");
        }
        hiveTable.setParameters(properties);
    }
    if (isView) {
        // TODO: [FLINK-12398] Support partitioned view in catalog API
        hiveTable.setPartitionKeys(new ArrayList<>());
        CatalogView view = (CatalogView) table;
        hiveTable.setViewOriginalText(view.getOriginalQuery());
        hiveTable.setViewExpandedText(view.getExpandedQuery());
        hiveTable.setTableType(TableType.VIRTUAL_VIEW.name());
    }
    return hiveTable;
}
Also used : CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlAlterHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlAlterHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) DescriptorProperties(org.apache.flink.table.descriptors.DescriptorProperties) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) CatalogTable(org.apache.flink.table.catalog.CatalogTable) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) CatalogView(org.apache.flink.table.catalog.CatalogView)

Example 2 with CatalogBaseTable

use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.

the class HiveCatalogHiveMetadataTest method testViewCompatibility.

// ------ table and column stats ------
@Test
public void testViewCompatibility() throws Exception {
    // we always store view schema via properties now
    // make sure non-generic views created previously can still be used
    catalog.createDatabase(db1, createDb(), false);
    Table hiveView = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(path1.getDatabaseName(), path1.getObjectName());
    // mark as a view
    hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
    final String originQuery = "view origin query";
    final String expandedQuery = "view expanded query";
    hiveView.setViewOriginalText(originQuery);
    hiveView.setViewExpandedText(expandedQuery);
    // set schema in SD
    Schema schema = Schema.newBuilder().fromFields(new String[] { "i", "s" }, new AbstractDataType[] { DataTypes.INT(), DataTypes.STRING() }).build();
    List<FieldSchema> fields = new ArrayList<>();
    for (Schema.UnresolvedColumn column : schema.getColumns()) {
        String name = column.getName();
        DataType type = (DataType) ((Schema.UnresolvedPhysicalColumn) column).getDataType();
        fields.add(new FieldSchema(name, HiveTypeUtil.toHiveTypeInfo(type, true).getTypeName(), null));
    }
    hiveView.getSd().setCols(fields);
    // test mark as non-generic with is_generic
    hiveView.getParameters().put(CatalogPropertiesUtil.IS_GENERIC, "false");
    // add some other properties
    hiveView.getParameters().put("k1", "v1");
    ((HiveCatalog) catalog).client.createTable(hiveView);
    CatalogBaseTable baseTable = catalog.getTable(path1);
    assertTrue(baseTable instanceof CatalogView);
    CatalogView catalogView = (CatalogView) baseTable;
    assertEquals(schema, catalogView.getUnresolvedSchema());
    assertEquals(originQuery, catalogView.getOriginalQuery());
    assertEquals(expandedQuery, catalogView.getExpandedQuery());
    assertEquals("v1", catalogView.getOptions().get("k1"));
    // test mark as non-generic with connector
    hiveView.setDbName(path3.getDatabaseName());
    hiveView.setTableName(path3.getObjectName());
    hiveView.getParameters().remove(CatalogPropertiesUtil.IS_GENERIC);
    hiveView.getParameters().put(CONNECTOR.key(), IDENTIFIER);
    ((HiveCatalog) catalog).client.createTable(hiveView);
    baseTable = catalog.getTable(path3);
    assertTrue(baseTable instanceof CatalogView);
    catalogView = (CatalogView) baseTable;
    assertEquals(schema, catalogView.getUnresolvedSchema());
    assertEquals(originQuery, catalogView.getOriginalQuery());
    assertEquals(expandedQuery, catalogView.getExpandedQuery());
    assertEquals("v1", catalogView.getOptions().get("k1"));
}
Also used : CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) SqlAlterHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlAlterHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) AbstractDataType(org.apache.flink.table.types.AbstractDataType) Schema(org.apache.flink.table.api.Schema) TableSchema(org.apache.flink.table.api.TableSchema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) DataType(org.apache.flink.table.types.DataType) AbstractDataType(org.apache.flink.table.types.AbstractDataType) CatalogColumnStatisticsDataString(org.apache.flink.table.catalog.stats.CatalogColumnStatisticsDataString) CatalogView(org.apache.flink.table.catalog.CatalogView) Test(org.junit.Test)

Example 3 with CatalogBaseTable

use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.

the class HiveCatalogTest method testCreateAndGetFlinkManagedTable.

@Test
public void testCreateAndGetFlinkManagedTable() throws Exception {
    CatalogTable table = new CatalogTableImpl(schema, Collections.emptyMap(), "Flink managed table");
    hiveCatalog.createTable(tablePath, table, false);
    Table hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertThat(hiveTable.getParameters()).containsEntry(FLINK_PROPERTY_PREFIX + CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
    CatalogBaseTable retrievedTable = hiveCatalog.instantiateCatalogTable(hiveTable);
    assertThat(retrievedTable.getOptions()).isEmpty();
}
Also used : CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) CatalogTableImpl(org.apache.flink.table.catalog.CatalogTableImpl) CatalogTable(org.apache.flink.table.catalog.CatalogTable) Test(org.junit.Test)

Example 4 with CatalogBaseTable

use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertDropTable.

private Operation convertDropTable(HiveParserASTNode ast, TableType expectedType) {
    String tableName = HiveParserBaseSemanticAnalyzer.getUnescapedName((HiveParserASTNode) ast.getChild(0));
    boolean ifExists = (ast.getFirstChildWithType(HiveASTParser.TOK_IFEXISTS) != null);
    ObjectIdentifier identifier = parseObjectIdentifier(tableName);
    CatalogBaseTable baseTable = getCatalogBaseTable(identifier, true);
    if (expectedType == TableType.VIRTUAL_VIEW) {
        if (baseTable instanceof CatalogTable) {
            throw new ValidationException("DROP VIEW for a table is not allowed");
        }
        return new DropViewOperation(identifier, ifExists, false);
    } else {
        if (baseTable instanceof CatalogView) {
            throw new ValidationException("DROP TABLE for a view is not allowed");
        }
        return new DropTableOperation(identifier, ifExists, false);
    }
}
Also used : CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) ValidationException(org.apache.flink.table.api.ValidationException) DropViewOperation(org.apache.flink.table.operations.ddl.DropViewOperation) DropTableOperation(org.apache.flink.table.operations.ddl.DropTableOperation) CatalogTable(org.apache.flink.table.catalog.CatalogTable) CatalogView(org.apache.flink.table.catalog.CatalogView) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Example 5 with CatalogBaseTable

use of org.apache.flink.table.catalog.CatalogBaseTable in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method getAlteredTable.

private CatalogBaseTable getAlteredTable(String tableName, boolean expectView) {
    ObjectIdentifier objectIdentifier = parseObjectIdentifier(tableName);
    CatalogBaseTable catalogBaseTable = getCatalogBaseTable(objectIdentifier);
    if (expectView) {
        if (catalogBaseTable instanceof CatalogTable) {
            throw new ValidationException("ALTER VIEW for a table is not allowed");
        }
    } else {
        if (catalogBaseTable instanceof CatalogView) {
            throw new ValidationException("ALTER TABLE for a view is not allowed");
        }
    }
    return catalogBaseTable;
}
Also used : CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) ValidationException(org.apache.flink.table.api.ValidationException) CatalogTable(org.apache.flink.table.catalog.CatalogTable) CatalogView(org.apache.flink.table.catalog.CatalogView) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Aggregations

CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)34 ObjectPath (org.apache.flink.table.catalog.ObjectPath)17 CatalogTable (org.apache.flink.table.catalog.CatalogTable)15 Test (org.junit.Test)14 ValidationException (org.apache.flink.table.api.ValidationException)11 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)10 CatalogView (org.apache.flink.table.catalog.CatalogView)9 TableSchema (org.apache.flink.table.api.TableSchema)8 Table (org.apache.hadoop.hive.metastore.api.Table)7 HashMap (java.util.HashMap)6 SqlCreateHiveTable (org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable)6 UniqueConstraint (org.apache.flink.table.api.constraints.UniqueConstraint)5 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)5 LinkedHashMap (java.util.LinkedHashMap)4 Map (java.util.Map)4 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)4 AlterViewAsOperation (org.apache.flink.table.operations.ddl.AlterViewAsOperation)4 ArrayList (java.util.ArrayList)3 TableEnvironment (org.apache.flink.table.api.TableEnvironment)3 CatalogException (org.apache.flink.table.catalog.exceptions.CatalogException)3