use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class HiveCatalogTest method testCreateGenericTable.
@Test
public void testCreateGenericTable() {
Table hiveTable = HiveTableUtil.instantiateHiveTable(new ObjectPath("test", "test"), new CatalogTableImpl(schema, getLegacyFileSystemConnectorOptions("/test_path"), null), HiveTestUtils.createHiveConf(), false);
Map<String, String> prop = hiveTable.getParameters();
assertThat(HiveCatalog.isHiveTable(prop)).isFalse();
assertThat(prop.keySet()).allMatch(k -> k.startsWith(CatalogPropertiesUtil.FLINK_PROPERTY_PREFIX));
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class HiveCatalogTest method testCreateHiveTable.
@Test
public void testCreateHiveTable() {
Map<String, String> options = getLegacyFileSystemConnectorOptions("/test_path");
options.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
Table hiveTable = HiveTableUtil.instantiateHiveTable(new ObjectPath("test", "test"), new CatalogTableImpl(schema, options, null), HiveTestUtils.createHiveConf(), false);
Map<String, String> prop = hiveTable.getParameters();
assertThat(HiveCatalog.isHiveTable(prop)).isTrue();
assertThat(prop.keySet()).noneMatch(k -> k.startsWith(CatalogPropertiesUtil.FLINK_PROPERTY_PREFIX));
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class HiveRunnerITCase method testStaticPartition.
@Test
public void testStaticPartition() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit();
tableEnv.executeSql("create table db1.dest (x int) partitioned by (p1 string, p2 double)");
tableEnv.executeSql("insert into db1.dest partition (p1='1\\'1', p2=1.1) select x from db1.src").await();
assertEquals(1, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size());
verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\t1'1\t1.1", "2\t1'1\t1.1"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class HiveSinkCompactionITCase method tearDown.
@After
public void tearDown() throws TableNotExistException {
if (hiveCatalog != null) {
hiveCatalog.dropTable(new ObjectPath(tEnv().getCurrentDatabase(), "sink_table"), true);
hiveCatalog.close();
}
}
use of org.apache.flink.table.catalog.ObjectPath in project flink by apache.
the class HiveSourceITCase method testRegularRead.
@Test
public void testRegularRead() throws Exception {
// test non-partitioned table
ObjectPath tablePath = new ObjectPath("default", "tbl1");
Map<String, String> tableOptions = new HashMap<>();
tableOptions.put(CONNECTOR.key(), IDENTIFIER);
hiveCatalog.createTable(tablePath, new CatalogTableImpl(TableSchema.builder().field("i", DataTypes.INT()).build(), tableOptions, null), false);
HiveTestUtils.createTextTableInserter(hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()).addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit();
StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
streamEnv.setParallelism(1);
HiveSource<RowData> hiveSource = new HiveSourceBuilder(new JobConf(hiveCatalog.getHiveConf()), new Configuration(), HiveShimLoader.getHiveVersion(), tablePath.getDatabaseName(), tablePath.getObjectName(), Collections.emptyMap()).buildWithDefaultBulkFormat();
List<RowData> results = CollectionUtil.iteratorToList(streamEnv.fromSource(hiveSource, WatermarkStrategy.noWatermarks(), "HiveSource-tbl1").executeAndCollect());
assertEquals(2, results.size());
assertEquals(1, results.get(0).getInt(0));
assertEquals(2, results.get(1).getInt(0));
hiveCatalog.dropTable(tablePath, false);
// test partitioned table
tablePath = new ObjectPath("default", "tbl2");
hiveCatalog.createTable(tablePath, new CatalogTableImpl(TableSchema.builder().field("i", DataTypes.INT()).field("p", DataTypes.STRING()).build(), Collections.singletonList("p"), tableOptions, null), false);
HiveTestUtils.createTextTableInserter(hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()).addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit("p='a'");
hiveSource = new HiveSourceBuilder(new JobConf(hiveCatalog.getHiveConf()), new Configuration(), HiveShimLoader.getHiveVersion(), tablePath.getDatabaseName(), tablePath.getObjectName(), Collections.emptyMap()).setLimit(1L).buildWithDefaultBulkFormat();
results = CollectionUtil.iteratorToList(streamEnv.fromSource(hiveSource, WatermarkStrategy.noWatermarks(), "HiveSource-tbl2").executeAndCollect());
assertEquals(1, results.size());
assertEquals(1, results.get(0).getInt(0));
assertEquals("a", results.get(0).getString(1).toString());
HiveTestUtils.createTextTableInserter(hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()).addRow(new Object[] { 3 }).commit("p='b'");
LinkedHashMap<String, String> spec = new LinkedHashMap<>();
spec.put("p", "b");
hiveSource = new HiveSourceBuilder(new JobConf(hiveCatalog.getHiveConf()), new Configuration(), null, tablePath.getDatabaseName(), tablePath.getObjectName(), Collections.emptyMap()).setPartitions(Collections.singletonList(HiveTablePartition.ofPartition(hiveCatalog.getHiveConf(), hiveCatalog.getHiveVersion(), tablePath.getDatabaseName(), tablePath.getObjectName(), spec))).buildWithDefaultBulkFormat();
results = CollectionUtil.iteratorToList(streamEnv.fromSource(hiveSource, WatermarkStrategy.noWatermarks(), "HiveSource-tbl2").executeAndCollect());
assertEquals(1, results.size());
assertEquals(3, results.get(0).getInt(0));
assertEquals("b", results.get(0).getString(1).toString());
hiveCatalog.dropTable(tablePath, false);
}
Aggregations