Search in sources :

Example 66 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveDialectITCase method testAlterPartition.

@Test
public void testAlterPartition() throws Exception {
    tableEnv.executeSql("create table tbl (x tinyint,y string) partitioned by (p1 bigint,p2 date)");
    tableEnv.executeSql("alter table tbl add partition (p1=1000,p2='2020-05-01') partition (p1=2000,p2='2020-01-01')");
    CatalogPartitionSpec spec1 = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {

        {
            put("p1", "1000");
            put("p2", "2020-05-01");
        }
    });
    CatalogPartitionSpec spec2 = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {

        {
            put("p1", "2000");
            put("p2", "2020-01-01");
        }
    });
    ObjectPath tablePath = new ObjectPath("default", "tbl");
    Table hiveTable = hiveCatalog.getHiveTable(tablePath);
    // change location
    String location = warehouse + "/new_part_location";
    tableEnv.executeSql(String.format("alter table tbl partition (p1=1000,p2='2020-05-01') set location '%s'", location));
    Partition partition = hiveCatalog.getHivePartition(hiveTable, spec1);
    assertEquals(location, locationPath(partition.getSd().getLocation()));
    // change file format
    tableEnv.executeSql("alter table tbl partition (p1=2000,p2='2020-01-01') set fileformat rcfile");
    partition = hiveCatalog.getHivePartition(hiveTable, spec2);
    assertEquals(LazyBinaryColumnarSerDe.class.getName(), partition.getSd().getSerdeInfo().getSerializationLib());
    assertEquals(RCFileInputFormat.class.getName(), partition.getSd().getInputFormat());
    assertEquals(RCFileOutputFormat.class.getName(), partition.getSd().getOutputFormat());
    // change serde
    tableEnv.executeSql(String.format("alter table tbl partition (p1=1000,p2='2020-05-01') set serde '%s' with serdeproperties('%s'='%s')", LazyBinarySerDe.class.getName(), serdeConstants.LINE_DELIM, "\n"));
    partition = hiveCatalog.getHivePartition(hiveTable, spec1);
    assertEquals(LazyBinarySerDe.class.getName(), partition.getSd().getSerdeInfo().getSerializationLib());
    assertEquals("\n", partition.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) RCFileOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat) ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) RCFileInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat) LazyBinaryColumnarSerDe(org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe) LazyBinarySerDe(org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) Test(org.junit.Test)

Example 67 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveDialectITCase method testShowPartitions.

@Test
public void testShowPartitions() throws Exception {
    tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt date, country string)");
    tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30',country='china') partition (dt='2020-04-30',country='us')");
    ObjectPath tablePath = new ObjectPath("default", "tbl");
    assertEquals(2, hiveCatalog.listPartitions(tablePath).size());
    List<Row> partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect());
    assertEquals(2, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30')").collect());
    assertEquals(2, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (country='china')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30',country='china')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-05-01',country='japan')").collect());
    assertEquals(0, partitions.size());
    try {
        CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (de='2020-04-30',city='china')").collect());
    } catch (TableException e) {
        assertEquals(String.format("Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", hiveCatalog.getName(), tablePath), e.getMessage());
    }
    tableEnv.executeSql("alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-04-30',country='us')");
    assertEquals(0, hiveCatalog.listPartitions(tablePath).size());
    tableEnv.executeSql("drop table tbl");
    tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt timestamp, country string)");
    tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30 01:02:03',country='china') partition (dt='2020-04-30 04:05:06',country='us')");
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect());
    assertEquals(2, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
    assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 01:02:03')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 04:05:06')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 01:02:03',country='china')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) TableException(org.apache.flink.table.api.TableException) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 68 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveRunnerITCase method testDynamicPartition.

@Test
public void testDynamicPartition() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    try {
        tableEnv.executeSql("create table db1.src (x int, y string, z double)");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, "a", 1.1 }).addRow(new Object[] { 2, "a", 2.2 }).addRow(new Object[] { 3, "b", 3.3 }).commit();
        tableEnv.executeSql("create table db1.dest (x int) partitioned by (p1 string, p2 double)");
        tableEnv.executeSql("insert into db1.dest select * from db1.src").await();
        assertEquals(3, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size());
        verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\ta\t1.1", "2\ta\t2.2", "3\tb\t3.3"));
    } finally {
        tableEnv.executeSql("drop database db1 cascade");
    }
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) TableEnvironment(org.apache.flink.table.api.TableEnvironment) Test(org.junit.Test)

Example 69 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class HiveRunnerITCase method testPartialDynamicPartition.

@Test
public void testPartialDynamicPartition() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    try {
        tableEnv.executeSql("create table db1.src (x int, y string)");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, "a" }).addRow(new Object[] { 2, "b" }).commit();
        tableEnv.executeSql("create table db1.dest (x int) partitioned by (p1 double, p2 string)");
        tableEnv.executeSql("insert into db1.dest partition (p1=1.1,p2) select x,y from db1.src").await();
        assertEquals(2, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size());
        verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\t1.1\ta", "2\t1.1\tb"));
    } finally {
        tableEnv.executeSql("drop database db1 cascade");
    }
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) TableEnvironment(org.apache.flink.table.api.TableEnvironment) Test(org.junit.Test)

Example 70 with ObjectPath

use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.

the class MySqlCatalogITCase method testGetTable.

@Test
public void testGetTable() throws TableNotExistException {
    CatalogBaseTable table = catalog.getTable(new ObjectPath(TEST_DB, TEST_TABLE_ALL_TYPES));
    assertEquals(TABLE_SCHEMA, table.getUnresolvedSchema());
}
Also used : CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) ObjectPath(org.apache.flink.table.catalog.ObjectPath) Test(org.junit.Test)

Aggregations

ObjectPath (org.apache.flink.table.catalog.ObjectPath)81 Test (org.junit.Test)52 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)32 CatalogTable (org.apache.flink.table.catalog.CatalogTable)29 HashMap (java.util.HashMap)21 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)20 TableSchema (org.apache.flink.table.api.TableSchema)19 TableEnvironment (org.apache.flink.table.api.TableEnvironment)17 CatalogPartitionSpec (org.apache.flink.table.catalog.CatalogPartitionSpec)12 Table (org.apache.hadoop.hive.metastore.api.Table)12 Configuration (org.apache.flink.configuration.Configuration)11 SqlCreateHiveTable (org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable)11 TableNotExistException (org.apache.flink.table.catalog.exceptions.TableNotExistException)9 ArrayList (java.util.ArrayList)8 Map (java.util.Map)8 GenericInMemoryCatalog (org.apache.flink.table.catalog.GenericInMemoryCatalog)8 LinkedHashMap (java.util.LinkedHashMap)7 Catalog (org.apache.flink.table.catalog.Catalog)7 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)6 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)6