use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class TableEnvHiveConnectorITCase method testNonExistingPartitionFolder.
@Test
public void testNonExistingPartitionFolder() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.part (x int) partitioned by (p int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 1 }).commit("p=1");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 2 }).commit("p=2");
tableEnv.executeSql("alter table db1.part add partition (p=3)");
// remove one partition
Path toRemove = new Path(hiveCatalog.getHiveTable(new ObjectPath("db1", "part")).getSd().getLocation(), "p=2");
FileSystem fs = toRemove.getFileSystem(hiveCatalog.getHiveConf());
fs.delete(toRemove, true);
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.part").execute().collect());
assertEquals("[+I[1, 1]]", results.toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class TableEnvHiveConnectorITCase method testPKConstraint.
@Test
public void testPKConstraint() throws Exception {
// While PK constraints are supported since Hive 2.1.0, the constraints cannot be RELY in
// 2.x versions.
// So let's only test for 3.x.
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
// test rely PK constraints
tableEnv.executeSql("create table db1.tbl1 (x tinyint,y smallint,z int, primary key (x,z) disable novalidate rely)");
CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl1"));
TableSchema tableSchema = catalogTable.getSchema();
assertTrue(tableSchema.getPrimaryKey().isPresent());
UniqueConstraint pk = tableSchema.getPrimaryKey().get();
assertEquals(2, pk.getColumns().size());
assertTrue(pk.getColumns().containsAll(Arrays.asList("x", "z")));
// test norely PK constraints
tableEnv.executeSql("create table db1.tbl2 (x tinyint,y smallint, primary key (x) disable norely)");
catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl2"));
tableSchema = catalogTable.getSchema();
assertFalse(tableSchema.getPrimaryKey().isPresent());
// test table w/o PK
tableEnv.executeSql("create table db1.tbl3 (x tinyint)");
catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl3"));
tableSchema = catalogTable.getSchema();
assertFalse(tableSchema.getPrimaryKey().isPresent());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class TableEnvHiveConnectorITCase method testNotNullConstraints.
@Test
public void testNotNullConstraints() throws Exception {
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.tbl (x int,y bigint not null enable rely,z string not null enable norely)");
CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl"));
TableSchema tableSchema = catalogTable.getSchema();
assertTrue("By default columns should be nullable", tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable());
assertFalse("NOT NULL columns should be reflected in table schema", tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable());
assertTrue("NOT NULL NORELY columns should be considered nullable", tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class TableEnvHiveConnectorITCase method testParquetNameMapping.
@Test
public void testParquetNameMapping() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.t1 (x int,y int) stored as parquet");
tableEnv.executeSql("insert into table db1.t1 values (1,10),(2,20)").await();
Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("db1", "t1"));
String location = hiveTable.getSd().getLocation();
tableEnv.executeSql(String.format("create table db1.t2 (y int,x int) stored as parquet location '%s'", location));
tableEnv.getConfig().getConfiguration().setBoolean(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, true);
assertEquals("[+I[1], +I[2]]", CollectionUtil.iteratorToList(tableEnv.sqlQuery("select x from db1.t1").execute().collect()).toString());
assertEquals("[+I[1], +I[2]]", CollectionUtil.iteratorToList(tableEnv.sqlQuery("select x from db1.t2").execute().collect()).toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class HiveDialectITCase method testCreateTableWithConstraints.
@Test
public void testCreateTableWithConstraints() throws Exception {
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
tableEnv.executeSql("create table tbl (x int,y int not null disable novalidate rely,z int not null disable novalidate norely," + "constraint pk_name primary key (x) disable rely)");
CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(new ObjectPath("default", "tbl"));
TableSchema tableSchema = catalogTable.getSchema();
assertTrue("PK not present", tableSchema.getPrimaryKey().isPresent());
assertEquals("pk_name", tableSchema.getPrimaryKey().get().getName());
assertFalse("PK cannot be null", tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable());
assertFalse("RELY NOT NULL should be reflected in schema", tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable());
assertTrue("NORELY NOT NULL shouldn't be reflected in schema", tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable());
}
Aggregations