Search in sources :

Example 16 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveDialectITCase method testCreateTableAs.

@Test
public void testCreateTableAs() throws Exception {
    tableEnv.executeSql("create table src (x int,y string)");
    tableEnv.executeSql("create table tbl1 as select x from src group by x").await();
    Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl1"));
    assertEquals(1, hiveTable.getSd().getCols().size());
    assertEquals("x", hiveTable.getSd().getCols().get(0).getName());
    assertEquals("int", hiveTable.getSd().getCols().get(0).getType());
    tableEnv.executeSql("create table default.tbl2 stored as orc as select x,max(y) as m from src group by x order by x limit 1").await();
    hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl2"));
    assertEquals(2, hiveTable.getSd().getCols().size());
    assertEquals("x", hiveTable.getSd().getCols().get(0).getName());
    assertEquals("m", hiveTable.getSd().getCols().get(1).getName());
    assertEquals(OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());
    assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat());
    assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) OrcOutputFormat(org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat) Test(org.junit.Test)

Example 17 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveDialectITCase method testAlterTable.

@Test
public void testAlterTable() throws Exception {
    tableEnv.executeSql("create table tbl (x int) tblproperties('k1'='v1')");
    tableEnv.executeSql("alter table tbl rename to tbl1");
    ObjectPath tablePath = new ObjectPath("default", "tbl1");
    // change properties
    tableEnv.executeSql("alter table `default`.tbl1 set tblproperties ('k2'='v2')");
    Table hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals("v1", hiveTable.getParameters().get("k1"));
    assertEquals("v2", hiveTable.getParameters().get("k2"));
    // change location
    String newLocation = warehouse + "/tbl1_new_location";
    tableEnv.executeSql(String.format("alter table default.tbl1 set location '%s'", newLocation));
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals(newLocation, locationPath(hiveTable.getSd().getLocation()));
    // change file format
    tableEnv.executeSql("alter table tbl1 set fileformat orc");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals(OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());
    assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat());
    assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());
    // change serde
    tableEnv.executeSql(String.format("alter table tbl1 set serde '%s' with serdeproperties('%s'='%s')", LazyBinarySerDe.class.getName(), serdeConstants.FIELD_DELIM, "\u0001"));
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals(LazyBinarySerDe.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());
    assertEquals("\u0001", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM));
    // replace columns
    tableEnv.executeSql("alter table tbl1 replace columns (t tinyint,s smallint,i int,b bigint,f float,d double,num decimal," + "ts timestamp,dt date,str string,var varchar(10),ch char(123),bool boolean,bin binary)");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals(14, hiveTable.getSd().getColsSize());
    assertEquals("varchar(10)", hiveTable.getSd().getCols().get(10).getType());
    assertEquals("char(123)", hiveTable.getSd().getCols().get(11).getType());
    tableEnv.executeSql("alter table tbl1 replace columns (a array<array<int>>,s struct<f1:struct<f11:int,f12:binary>, f2:map<double,date>>," + "m map<char(5),map<timestamp,decimal(20,10)>>)");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals("array<array<int>>", hiveTable.getSd().getCols().get(0).getType());
    assertEquals("struct<f1:struct<f11:int,f12:binary>,f2:map<double,date>>", hiveTable.getSd().getCols().get(1).getType());
    assertEquals("map<char(5),map<timestamp,decimal(20,10)>>", hiveTable.getSd().getCols().get(2).getType());
    // add columns
    tableEnv.executeSql("alter table tbl1 add columns (x int,y int)");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals(5, hiveTable.getSd().getColsSize());
    // change column
    tableEnv.executeSql("alter table tbl1 change column x x1 string comment 'new x col'");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    assertEquals(5, hiveTable.getSd().getColsSize());
    FieldSchema newField = hiveTable.getSd().getCols().get(3);
    assertEquals("x1", newField.getName());
    assertEquals("string", newField.getType());
    tableEnv.executeSql("alter table tbl1 change column y y int first");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    newField = hiveTable.getSd().getCols().get(0);
    assertEquals("y", newField.getName());
    assertEquals("int", newField.getType());
    tableEnv.executeSql("alter table tbl1 change column x1 x2 timestamp after y");
    hiveTable = hiveCatalog.getHiveTable(tablePath);
    newField = hiveTable.getSd().getCols().get(1);
    assertEquals("x2", newField.getName());
    assertEquals("timestamp", newField.getType());
    // add/replace columns cascade
    tableEnv.executeSql("create table tbl2 (x int) partitioned by (dt date,id bigint)");
    tableEnv.executeSql("alter table tbl2 add partition (dt='2020-01-23',id=1) partition (dt='2020-04-24',id=2)");
    CatalogPartitionSpec partitionSpec1 = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {

        {
            put("dt", "2020-01-23");
            put("id", "1");
        }
    });
    CatalogPartitionSpec partitionSpec2 = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {

        {
            put("dt", "2020-04-24");
            put("id", "2");
        }
    });
    tableEnv.executeSql("alter table tbl2 replace columns (ti tinyint,d decimal) cascade");
    ObjectPath tablePath2 = new ObjectPath("default", "tbl2");
    hiveTable = hiveCatalog.getHiveTable(tablePath2);
    Partition hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1);
    assertEquals(2, hivePartition.getSd().getColsSize());
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2);
    assertEquals(2, hivePartition.getSd().getColsSize());
    tableEnv.executeSql("alter table tbl2 add columns (ch char(5),vch varchar(9)) cascade");
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1);
    assertEquals(4, hivePartition.getSd().getColsSize());
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2);
    assertEquals(4, hivePartition.getSd().getColsSize());
    // change column cascade
    tableEnv.executeSql("alter table tbl2 change column ch ch char(10) cascade");
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1);
    assertEquals("char(10)", hivePartition.getSd().getCols().get(2).getType());
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2);
    assertEquals("char(10)", hivePartition.getSd().getCols().get(2).getType());
    tableEnv.executeSql("alter table tbl2 change column vch str string first cascade");
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1);
    assertEquals("str", hivePartition.getSd().getCols().get(0).getName());
    hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2);
    assertEquals("str", hivePartition.getSd().getCols().get(0).getName());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) LazyBinarySerDe(org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe) OrcOutputFormat(org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) Test(org.junit.Test)

Example 18 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveDialectITCase method testView.

@Test
public void testView() throws Exception {
    tableEnv.executeSql("create table tbl (x int,y string)");
    // create
    tableEnv.executeSql("create view v(vx) comment 'v comment' tblproperties ('k1'='v1') as select x from tbl");
    ObjectPath viewPath = new ObjectPath("default", "v");
    CatalogBaseTable catalogBaseTable = hiveCatalog.getTable(viewPath);
    assertTrue(catalogBaseTable instanceof CatalogView);
    assertEquals("vx", catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName());
    assertEquals("v1", catalogBaseTable.getOptions().get("k1"));
    // change properties
    tableEnv.executeSql("alter view v set tblproperties ('k1'='v11')");
    catalogBaseTable = hiveCatalog.getTable(viewPath);
    assertEquals("v11", catalogBaseTable.getOptions().get("k1"));
    // change query
    tableEnv.executeSql("alter view v as select y from tbl");
    catalogBaseTable = hiveCatalog.getTable(viewPath);
    assertEquals("y", catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName());
    // rename
    tableEnv.executeSql("alter view v rename to v1");
    viewPath = new ObjectPath("default", "v1");
    assertTrue(hiveCatalog.tableExists(viewPath));
    // drop
    tableEnv.executeSql("drop view v1");
    assertFalse(hiveCatalog.tableExists(viewPath));
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) CatalogView(org.apache.flink.table.catalog.CatalogView) Test(org.junit.Test)

Example 19 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveDialectITCase method testCreateTable.

@Test
public void testCreateTable() throws Exception {
    String location = warehouse + "/external_location";
    tableEnv.executeSql(String.format("create external table tbl1 (d decimal(10,0),ts timestamp) partitioned by (p string) location '%s' tblproperties('k1'='v1')", location));
    Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl1"));
    assertEquals(TableType.EXTERNAL_TABLE.toString(), hiveTable.getTableType());
    assertEquals(1, hiveTable.getPartitionKeysSize());
    assertEquals(location, locationPath(hiveTable.getSd().getLocation()));
    assertEquals("v1", hiveTable.getParameters().get("k1"));
    assertFalse(hiveTable.getParameters().containsKey(SqlCreateHiveTable.TABLE_LOCATION_URI));
    tableEnv.executeSql("create table tbl2 (s struct<ts:timestamp,bin:binary>) stored as orc");
    hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl2"));
    assertEquals(TableType.MANAGED_TABLE.toString(), hiveTable.getTableType());
    assertEquals(OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());
    assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat());
    assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());
    tableEnv.executeSql("create table tbl3 (m map<timestamp,binary>) partitioned by (p1 bigint,p2 tinyint) " + "row format serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe'");
    hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl3"));
    assertEquals(2, hiveTable.getPartitionKeysSize());
    assertEquals(LazyBinarySerDe.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());
    tableEnv.executeSql("create table tbl4 (x int,y smallint) row format delimited fields terminated by '|' lines terminated by '\n'");
    hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl4"));
    assertEquals("|", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM));
    assertEquals("|", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.SERIALIZATION_FORMAT));
    assertEquals("\n", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM));
    tableEnv.executeSql("create table tbl5 (m map<bigint,string>) row format delimited collection items terminated by ';' " + "map keys terminated by ':'");
    hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl5"));
    assertEquals(";", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.COLLECTION_DELIM));
    assertEquals(":", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.MAPKEY_DELIM));
    int createdTimeForTableExists = hiveTable.getCreateTime();
    tableEnv.executeSql("create table if not exists tbl5 (m map<bigint,string>)");
    hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl5"));
    assertEquals(createdTimeForTableExists, hiveTable.getCreateTime());
    // test describe table
    Parser parser = ((TableEnvironmentInternal) tableEnv).getParser();
    DescribeTableOperation operation = (DescribeTableOperation) parser.parse("desc tbl1").get(0);
    assertFalse(operation.isExtended());
    assertEquals(ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl1"), operation.getSqlIdentifier());
    operation = (DescribeTableOperation) parser.parse("describe default.tbl2").get(0);
    assertFalse(operation.isExtended());
    assertEquals(ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl2"), operation.getSqlIdentifier());
    operation = (DescribeTableOperation) parser.parse("describe extended tbl3").get(0);
    assertTrue(operation.isExtended());
    assertEquals(ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl3"), operation.getSqlIdentifier());
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) TableEnvironmentInternal(org.apache.flink.table.api.internal.TableEnvironmentInternal) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) LazyBinarySerDe(org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe) DescribeTableOperation(org.apache.flink.table.operations.DescribeTableOperation) OrcOutputFormat(org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat) HiveParser(org.apache.flink.table.planner.delegation.hive.HiveParser) Parser(org.apache.flink.table.delegation.Parser) Test(org.junit.Test)

Example 20 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class CatalogTableStats method register2Catalog.

public void register2Catalog(TableEnvironment tEnv, String table) {
    tEnv.getCatalog(tEnv.getCurrentCatalog()).ifPresent(catalog -> {
        try {
            catalog.alterTableStatistics(new ObjectPath(tEnv.getCurrentDatabase(), table), catalogTableStatistics, false);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    });
    tEnv.getCatalog(tEnv.getCurrentCatalog()).ifPresent(catalog -> {
        try {
            catalog.alterTableColumnStatistics(new ObjectPath(tEnv.getCurrentDatabase(), table), catalogColumnStatistics, false);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    });
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath)

Aggregations

ObjectPath (org.apache.flink.table.catalog.ObjectPath)81 Test (org.junit.Test)52 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)32 CatalogTable (org.apache.flink.table.catalog.CatalogTable)29 HashMap (java.util.HashMap)21 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)20 TableSchema (org.apache.flink.table.api.TableSchema)19 TableEnvironment (org.apache.flink.table.api.TableEnvironment)17 CatalogPartitionSpec (org.apache.flink.table.catalog.CatalogPartitionSpec)12 Table (org.apache.hadoop.hive.metastore.api.Table)12 Configuration (org.apache.flink.configuration.Configuration)11 SqlCreateHiveTable (org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable)11 TableNotExistException (org.apache.flink.table.catalog.exceptions.TableNotExistException)9 ArrayList (java.util.ArrayList)8 Map (java.util.Map)8 GenericInMemoryCatalog (org.apache.flink.table.catalog.GenericInMemoryCatalog)8 LinkedHashMap (java.util.LinkedHashMap)7 Catalog (org.apache.flink.table.catalog.Catalog)7 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)6 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)6