use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testTableWithPrimaryKey.
@Test
public void testTableWithPrimaryKey() {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);
tableEnv.registerCatalog("catalog1", hiveCatalog);
tableEnv.useCatalog("catalog1");
final String createTable = "CREATE TABLE pk_src (\n" + " uuid varchar(40) not null,\n" + " price DECIMAL(10, 2),\n" + " currency STRING,\n" + " ts6 TIMESTAMP(6),\n" + " ts AS CAST(ts6 AS TIMESTAMP(3)),\n" + " WATERMARK FOR ts AS ts,\n" + " constraint ct1 PRIMARY KEY(uuid) NOT ENFORCED)\n" + " WITH (\n" + " 'connector.type' = 'filesystem'," + " 'connector.path' = 'file://fakePath'," + " 'format.type' = 'csv')";
tableEnv.executeSql(createTable);
TableSchema tableSchema = tableEnv.getCatalog(tableEnv.getCurrentCatalog()).map(catalog -> {
try {
final ObjectPath tablePath = ObjectPath.fromString(catalog.getDefaultDatabase() + '.' + "pk_src");
return catalog.getTable(tablePath).getSchema();
} catch (TableNotExistException e) {
return null;
}
}).orElse(null);
assertThat(tableSchema).isNotNull();
assertThat(tableSchema.getPrimaryKey()).hasValue(UniqueConstraint.primaryKey("ct1", Collections.singletonList("uuid")));
tableEnv.executeSql("DROP TABLE pk_src");
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testWriteComplexType.
@Test
public void testWriteComplexType() throws Exception {
TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithHiveCatalog(hiveCatalog);
Row row = new Row(3);
Object[] array = new Object[] { 1, 2, 3 };
Map<Integer, String> map = new HashMap<>();
map.put(1, "a");
map.put(2, "b");
Row struct = new Row(2);
struct.setField(0, 3);
struct.setField(1, "c");
row.setField(0, array);
row.setField(1, map);
row.setField(2, struct);
TestCollectionTableFactory.reset();
TestCollectionTableFactory.initData(Collections.singletonList(row));
tableEnv.executeSql("create table default_catalog.default_database.complexSrc (a array<int>,m map<int, string>,s row<f1 int,f2 string>) " + "with ('connector'='COLLECTION','is-bounded' = 'true')");
tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
tableEnv.executeSql("create table dest (a array<int>,m map<int, string>,s struct<f1:int,f2:string>)");
try {
tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
tableEnv.executeSql("insert into dest select * from default_catalog.default_database.complexSrc").await();
List<String> result = hiveShell.executeQuery("select * from dest");
assertEquals(1, result.size());
assertEquals("[1,2,3]\t{1:\"a\",2:\"b\"}\t{\"f1\":3,\"f2\":\"c\"}", result.get(0));
} finally {
tableEnv.executeSql("drop table dest");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testStaticPartition.
@Test
public void testStaticPartition() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit();
tableEnv.executeSql("create table db1.dest (x int) partitioned by (p1 string, p2 double)");
tableEnv.executeSql("insert into db1.dest partition (p1='1\\'1', p2=1.1) select x from db1.src").await();
assertEquals(1, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size());
verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\t1'1\t1.1", "2\t1'1\t1.1"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testInsertIntoNonPartitionTable.
@Test
public void testInsertIntoNonPartitionTable() throws Exception {
List<Row> toWrite = generateRecords(5);
TestCollectionTableFactory.reset();
TestCollectionTableFactory.initData(toWrite);
TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithHiveCatalog(hiveCatalog);
tableEnv.executeSql("create table default_catalog.default_database.src (i int,l bigint,d double,s string) " + "with ('connector'='COLLECTION','is-bounded' = 'true')");
tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
tableEnv.executeSql("create table dest (i int,l bigint,d double,s string)");
try {
tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
tableEnv.executeSql("insert into dest select * from default_catalog.default_database.src").await();
verifyWrittenData(toWrite, hiveShell.executeQuery("select * from dest"));
} finally {
tableEnv.executeSql("drop table dest");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testRegexSerDe.
@Test
public void testRegexSerDe() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x int,y string) " + "row format serde 'org.apache.hadoop.hive.serde2.RegexSerDe' " + "with serdeproperties ('input.regex'='([\\\\d]+)\\u0001([\\\\S]+)')");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, "a" }).addRow(new Object[] { 2, "ab" }).commit();
assertEquals("[+I[1, a], +I[2, ab]]", CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.src order by x").execute().collect()).toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
Aggregations